ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b400be6b8e56ed12413b7e7855726f6651b1eb65 |
import os
import random
from selenium import webdriver
from .constants import PROXIES
class ProxiesMiddleware:
def __init__(self):
# self.proxies = RealTimeProxies().list()
self.proxies = PredefinedProxies().list()
def process_request(self, request, spider):
request.meta['proxy'] = random.choice(self.proxies)
class RealTimeProxies:
"""Get a US HTTPS proxies list in real-time
Currently, the data is retrieved from proxydb.net, but
other sites could be integrated similarly. This is necessary
to have a fresh list of US-based proxies that we can use
to scrape the rest of the product data.
"""
url = 'http://proxydb.net/?protocol=https&country=US'
table_css_selector = (
'body > div > div.table-responsive ' +
'> table > tbody > tr'
)
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('headless')
self.driver = webdriver.Chrome(
'{}/../utilities/chromedriver'.format(
os.path.dirname(os.path.realpath(__file__))),
chrome_options=options
)
def list(self):
print('-' * 100)
print('GETTING FRESH LIST OF PROXIES...')
self.driver.get(self.url)
rows = self.driver.find_elements_by_css_selector(
self.table_css_selector
)
proxies = []
for row in rows:
proxies.append('http://{}'.format(
row.find_elements_by_tag_name("td")[0].text))
print(proxies)
print('-' * 100)
return proxies
class PredefinedProxies:
"""Use a predefined set of proxies
Semi-dedicated proxies from: https://blazingseollc.com/
"""
def __init__(self):
self.proxies = PROXIES
def list(self):
return self.proxies
|
py | b400beb738b4926e30a5fb3f22844af67e7d8220 | import numpy as np
from scipy.optimize import minimize
import emcee
from comancpipeline.Tools import stats
from tqdm import tqdm
# FUNCTION FOR FITTING ROUTINES
class Gauss2dRot:
def __init__(self):
self.__name__ = Gauss2dRot.__name__
def __call__(self,*args,**kwargs):
A, x0, sigx, y0, sigy, phi, B = args[0]
if (sigx <= 0) | (sigy <= 0):
return np.inf
if (phi <= -2*np.pi) | (phi >=2*np.pi):
return np.inf
return self.func(*args,**kwargs)
def func(self,P, xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
model = A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + B
return model
def deriv(self,P,xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
r2 = (Xr**2 + Yr**2)
m = np.exp(-0.5 * r2)
# Constants for offset derivatives
Cx = (np.cos(phi)**2/sigx**2 + np.sin(phi)**2/sigy**2)
Cy = (np.cos(phi)**2/sigy**2 + np.sin(phi)**2/sigx**2)
Cr = (np.sin(2*phi) /sigx**2 - np.sin(2*phi) /sigy**2)
# Constants for sigma derivatives
Zx = (x-x0)**2 * np.cos(phi)**2 + (y-y0)**2 * np.sin(phi)**2 + (x-x0) * (y-y0) * np.sin(2*phi)
Zy = (x-x0)**2 * np.sin(phi)**2 + (y-y0)**2 * np.cos(phi)**2 - (x-x0) * (y-y0) * np.sin(2*phi)
# Constant for rotation derivative(x-x0)
Rc = 0.5 * ((x-x0)**2-(y-y0)**2) * ( np.sin(2*phi)/sigx**2 - np.sin(2*phi)/sigy**2) - (x-x0) * (y-y0) * np.cos(2*phi) * (1./sigx**2 - 1./sigy**2)
output= np.array([m,
A * m * (Cx * (x - x0) + 0.5*(y-y0) * Cr) , # offset x
A * m / sigx**3 * Zx, # sigma x
A * m * (Cy * (y - y0) + 0.5*(x-x0) * Cr) , # offset y
A * m / sigy**3 * Zy, # sigma y
A * m * Rc, # rotation angle
np.ones(m.size)])
return np.transpose(output)
class Gauss2dRot_Gradient:
def __init__(self):
self.__name__ = Gauss2dRot_Gradient.__name__
def __call__(self,*args,**kwargs):
A, x0, sigx, y0, sigy, phi, B, Gx, Gy, Gxy = args[0]
return self.func(*args,**kwargs)
def func(self,P, xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B, Gx, Gy, Gxy = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
model = A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + B + Gx*(x-x0) + Gy*(y-y0) + Gxy*(x-x0)*(y-y0)
return model
def deriv(self,P,xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
r2 = (Xr**2 + Yr**2)
m = np.exp(-0.5 * r2)
# Constants for offset derivatives
Cx = (np.cos(phi)**2/sigx**2 + np.sin(phi)**2/sigy**2)
Cy = (np.cos(phi)**2/sigy**2 + np.sin(phi)**2/sigx**2)
Cr = (np.sin(2*phi) /sigx**2 - np.sin(2*phi) /sigy**2)
# Constants for sigma derivatives
Zx = (x-x0)**2 * np.cos(phi)**2 + (y-y0)**2 * np.sin(phi)**2 + (x-x0) * (y-y0) * np.sin(2*phi)
Zy = (x-x0)**2 * np.sin(phi)**2 + (y-y0)**2 * np.cos(phi)**2 - (x-x0) * (y-y0) * np.sin(2*phi)
# Constant for rotation derivative(x-x0)
Rc = 0.5 * ((x-x0)**2-(y-y0)**2) * ( np.sin(2*phi)/sigx**2 - np.sin(2*phi)/sigy**2) - (x-x0) * (y-y0) * np.cos(2*phi) * (1./sigx**2 - 1./sigy**2)
output= np.array([m,
A * m * (Cx * (x - x0) + 0.5*(y-y0) * Cr) , # offset x
A * m / sigx**3 * Zx, # sigma x
A * m * (Cy * (y - y0) + 0.5*(x-x0) * Cr) , # offset y
A * m / sigy**3 * Zy, # sigma y
A * m * Rc, # rotation angle
np.ones(m.size)])
return np.transpose(output)
class Gauss2dRot_Gradient2:
def __init__(self):
self.__name__ = Gauss2dRot_Gradient.__name__
def __call__(self,*args,**kwargs):
A, x0, sigx, y0, sigy, phi, B, Gx, Gy, Gxy,Gx2,Gy2,Gxy2,Gx2y,Gx2y2 = args[0]
return self.func(*args,**kwargs)
def func(self,P, xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B, Gx, Gy, Gxy,Gx2,Gy2,Gxy2,Gx2y,Gx2y2 = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
poly = Gx*(x-x0) + Gy*(y-y0) + Gxy*(x-x0)*(y-y0) + Gx2*(x-x0)**2 + Gy2*(x-x0)**2 + Gxy2*(x-x0)*(y-y0)**2 + Gx2y*(x-x0)**2*(y-y0) + Gx2y2*(x-x0)**2*(y-y0)**2
model = A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + B + poly
return model
def deriv(self,P,xy):
x,y = xy
A, x0, sigx, y0, sigy, phi, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
r2 = (Xr**2 + Yr**2)
m = np.exp(-0.5 * r2)
# Constants for offset derivatives
Cx = (np.cos(phi)**2/sigx**2 + np.sin(phi)**2/sigy**2)
Cy = (np.cos(phi)**2/sigy**2 + np.sin(phi)**2/sigx**2)
Cr = (np.sin(2*phi) /sigx**2 - np.sin(2*phi) /sigy**2)
# Constants for sigma derivatives
Zx = (x-x0)**2 * np.cos(phi)**2 + (y-y0)**2 * np.sin(phi)**2 + (x-x0) * (y-y0) * np.sin(2*phi)
Zy = (x-x0)**2 * np.sin(phi)**2 + (y-y0)**2 * np.cos(phi)**2 - (x-x0) * (y-y0) * np.sin(2*phi)
# Constant for rotation derivative(x-x0)
Rc = 0.5 * ((x-x0)**2-(y-y0)**2) * ( np.sin(2*phi)/sigx**2 - np.sin(2*phi)/sigy**2) - (x-x0) * (y-y0) * np.cos(2*phi) * (1./sigx**2 - 1./sigy**2)
output= np.array([m,
A * m * (Cx * (x - x0) + 0.5*(y-y0) * Cr) , # offset x
A * m / sigx**3 * Zx, # sigma x
A * m * (Cy * (y - y0) + 0.5*(x-x0) * Cr) , # offset y
A * m / sigy**3 * Zy, # sigma y
A * m * Rc, # rotation angle
np.ones(m.size)])
return np.transpose(output)
class Gauss2dRot_FixedPos:
def __init__(self):
self.__name__ = Gauss2dRot_FixedPos.__name__
def __call__(self,*args,**kwargs):
A, sigx, sigy, B = args[0]
if (sigx <= 0) | (sigy <= 0):
return 1e32
return self.func(*args,**kwargs)
def func(self,P, xy, x0=0, y0=0, phi=0):
x,y = xy
A, sigx, sigy, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
model = A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + B
return model
def deriv(self,P,xy, x0=0, y0=0, phi=0):
x,y = xy
A, sigx, sigy, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
r2 = (Xr**2 + Yr**2)
m = np.exp(-0.5 * r2)
# Constants for offset derivatives
Cx = (np.cos(phi)**2/sigx**2 + np.sin(phi)**2/sigy**2)
Cy = (np.cos(phi)**2/sigy**2 + np.sin(phi)**2/sigx**2)
Cr = (np.sin(2*phi) /sigx**2 - np.sin(2*phi) /sigy**2)
# Constants for sigma derivatives
Zx = (x-x0)**2 * np.cos(phi)**2 + (y-y0)**2 * np.sin(phi)**2 + (x-x0) * (y-y0) * np.sin(2*phi)
Zy = (x-x0)**2 * np.sin(phi)**2 + (y-y0)**2 * np.cos(phi)**2 - (x-x0) * (y-y0) * np.sin(2*phi)
# Constant for rotation derivative(x-x0)
Rc = 0.5 * ((x-x0)**2-(y-y0)**2) * ( np.sin(2*phi)/sigx**2 - np.sin(2*phi)/sigy**2) - (x-x0) * (y-y0) * np.cos(2*phi) * (1./sigx**2 - 1./sigy**2)
output= np.array([m,
A * m / sigx**3 * Zx, # sigma x
A * m / sigy**3 * Zy, # sigma y
np.ones(m.size)])
return np.transpose(output)
def auto_comps(self,f,y,d1,d2):
return (0.5*np.sum(d1**2) + np.sum(f*d2) - np.sum(y*d2))
def cross_comps(self,f,y,d1a,d1b,d2):
return (0.5*np.sum(d1a*d1b) + np.sum(f*d2) - np.sum(y*d2))
def covariance(self,P,xy,z,e, x0=0, y0=0, phi=0):
"""
"""
x,y = xy
A, sigx, sigy, B = P
X = (x - x0) * np.cos(phi) + (y-y0) * np.sin(phi)
Y =-(x - x0) * np.sin(phi) + (y-y0) * np.cos(phi)
deriv = {'dA' : self.func(P,xy,x0,y0,phi)/e,
'dA2' : 0.,
'dB' : np.ones(x.size)/e,
'dB2' : 0.,
'dSigX' : self.func(P,xy,x0,y0,phi)*X**2/sigx**3/e,
'dSigY' : self.func(P,xy,x0,y0,phi)*Y**2/sigy**3/e}
deriv['dSigX2'] = deriv['dSigX']*(X**2/sigx**3 - 3./sigx)
deriv['dSigY2'] = deriv['dSigY']*(Y**2/sigy**3 - 3./sigy)
deriv['dSigXSigY'] = deriv['dSigX']*deriv['dSigY']/self.func(P,xy,x0,y0,phi)*e # to cancel the double instance of the uncertainty
deriv['dASigX'] = deriv['dSigX']/A
deriv['dASigY'] = deriv['dSigY']/A
c = {'00':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dA2']) , # AUTO
'10':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dSigX'] , deriv['dASigX']),
'30':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dA'] , 0),
'31':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dSigX'] , 0),
'32':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dSigY'] , 0),
'11':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dSigX'], deriv['dSigX2']), # AUTO
'20':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dSigY'] , deriv['dASigY']),
'21':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dSigX'], deriv['dSigY'] , deriv['dSigXSigY']),
'22':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dSigY'], deriv['dSigY2']), # AUTO
'33':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dB2'])} # AUTO
V = np.array([[c['00'],c['10'],c['20'],c['30']],
[c['10'],c['11'],c['21'],c['31']],
[c['20'],c['21'],c['22'],c['32']],
[c['30'],c['31'],c['32'],c['33']]])
C = np.linalg.inv(V)
return np.sqrt(np.diag(C))
class Gauss2dRot_Gradient_FixedPos:
def __init__(self):
self.__name__ = Gauss2dRot_Gradient_FixedPos.__name__
def __call__(self,*args,**kwargs):
A, sigx, sigy, B, Gx, Gy, Gxy = args[0]
if (sigx <= 0) | (sigy <= 0):
return np.inf
return self.func(*args,**kwargs)
def func(self,P, xy, x0=0, y0=0, phi=0):
x,y = xy
A, sigx, sigy, B, Gx, Gy, Gxy = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
model = A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + B + Gx*(x-x0) + Gy*(y-y0) + Gxy*(x-x0)*(y-y0)
return model
def deriv(self,P,xy, x0=0, y0=0, phi=0):
x,y = xy
A, sigx, sigy, B = P
Xr = (x - x0)/sigx * np.cos(phi) + (y-y0)/sigx * np.sin(phi)
Yr =-(x - x0)/sigy * np.sin(phi) + (y-y0)/sigy * np.cos(phi)
r2 = (Xr**2 + Yr**2)
m = np.exp(-0.5 * r2)
# Constants for offset derivatives
Cx = (np.cos(phi)**2/sigx**2 + np.sin(phi)**2/sigy**2)
Cy = (np.cos(phi)**2/sigy**2 + np.sin(phi)**2/sigx**2)
Cr = (np.sin(2*phi) /sigx**2 - np.sin(2*phi) /sigy**2)
# Constants for sigma derivatives
Zx = (x-x0)**2 * np.cos(phi)**2 + (y-y0)**2 * np.sin(phi)**2 + (x-x0) * (y-y0) * np.sin(2*phi)
Zy = (x-x0)**2 * np.sin(phi)**2 + (y-y0)**2 * np.cos(phi)**2 - (x-x0) * (y-y0) * np.sin(2*phi)
# Constant for rotation derivative(x-x0)
Rc = 0.5 * ((x-x0)**2-(y-y0)**2) * ( np.sin(2*phi)/sigx**2 - np.sin(2*phi)/sigy**2) - (x-x0) * (y-y0) * np.cos(2*phi) * (1./sigx**2 - 1./sigy**2)
output= np.array([m,
A * m / sigx**3 * Zx, # sigma x
A * m / sigy**3 * Zy, # sigma y
np.ones(m.size)])
return np.transpose(output)
def auto_comps(self,f,y,d1,d2):
return (0.5*np.sum(d1**2) + np.sum(f*d2) - np.sum(y*d2))
def cross_comps(self,f,y,d1a,d1b,d2):
return (0.5*np.sum(d1a*d1b) + np.sum(f*d2) - np.sum(y*d2))
def covariance(self,P,xy,z,e, x0=0, y0=0, phi=0):
"""
"""
x,y = xy
A, sigx, sigy, B = P
X = (x - x0) * np.cos(phi) + (y-y0) * np.sin(phi)
Y =-(x - x0) * np.sin(phi) + (y-y0) * np.cos(phi)
deriv = {'dA' : self.func(P,xy,x0,y0,phi)/e,
'dA2' : 0.,
'dB' : np.ones(x.size)/e,
'dB2' : 0.,
'dSigX' : self.func(P,xy,x0,y0,phi)*X**2/sigx**3/e,
'dSigY' : self.func(P,xy,x0,y0,phi)*Y**2/sigy**3/e}
deriv['dSigX2'] = deriv['dSigX']*(X**2/sigx**3 - 3./sigx)
deriv['dSigY2'] = deriv['dSigY']*(Y**2/sigy**3 - 3./sigy)
deriv['dSigXSigY'] = deriv['dSigX']*deriv['dSigY']/self.func(P,xy,x0,y0,phi)*e # to cancel the double instance of the uncertainty
deriv['dASigX'] = deriv['dSigX']/A
deriv['dASigY'] = deriv['dSigY']/A
c = {'00':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dA2']) , # AUTO
'10':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dSigX'] , deriv['dASigX']),
'30':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dA'] , 0),
'31':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dSigX'] , 0),
'32':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dSigY'] , 0),
'11':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dSigX'], deriv['dSigX2']), # AUTO
'20':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dA'] , deriv['dSigY'] , deriv['dASigY']),
'21':self.cross_comps(self.func(P,xy,x0,y0,phi), z, deriv['dSigX'], deriv['dSigY'] , deriv['dSigXSigY']),
'22':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dSigY'], deriv['dSigY2']), # AUTO
'33':self.auto_comps (self.func(P,xy,x0,y0,phi), z, deriv['dB'] , deriv['dB2'])} # AUTO
V = np.array([[c['00'],c['10'],c['20'],c['30']],
[c['10'],c['11'],c['21'],c['31']],
[c['20'],c['21'],c['22'],c['32']],
[c['30'],c['31'],c['32'],c['33']]])
C = np.linalg.inv(V)
return np.sqrt(np.diag(C))
class Gauss2dRot_General:
def __init__(self,defaults={'x0':0,'y0':0,'sigx':0,'sigy_scale':1,'phi':0,'A':0,'B':0},
fixed=[],
use_bootstrap=False,
use_leastsqs =False):
self.__name__ = Gauss2dRot_General.__name__
self.use_bootstrap = use_bootstrap
self.use_leastsqs = use_leastsqs
fixed = {k:True for k in fixed}
self.set_fixed(**fixed)
self.defaults = defaults
self.param_names = ['A','x0','sigx','y0','sigy_scale','phi','B']
self.A = 0
self.B = 0
self.sigx = 0
self.sigy_scale = 1
self.x0 = 0
self.y0 = 0
self.phi = 0
self.idx = {k:i for i,k in enumerate(self.param_names)}
def limfunc(self,P):
lims = {'A':lambda P: False,
'B':lambda P: False,
'x0':lambda P: False,
'y0':lambda P: False,
'phi':lambda P: (P['phi'] < -np.pi/2.) | (P['phi'] > np.pi/2.),
'sigx': lambda P: (P['sigx'] < 0),
'sigy_scale': lambda P: (P['sigy_scale'] < 1) | (P['sigy_scale'] > 10)}
params = self.get_param_names()
Pz = {k:p for k,p in zip(params,P)}
lims = [lims[k](Pz) for k in params]
return any(lims)
def get_param_names(self):
return [p for p in self.param_names if not self.fixed[p]]
def __call__(self,P0_dict,xy,z,covariance,P0_priors={},
limfunc=None, nwalkers=32, samples=5000, discard=100,thin=15,return_array=False):
normalise = 1#np.max(z)
z /= normalise
covariance /= normalise**2
self.P0_priors = P0_priors
if isinstance(limfunc,type(None)):
self.limfunc = self.limfunc
else:
self.limfunc = limfunc
P0 = [v for k,v in P0_dict.items()]
self.idx= {k:i for i, k in enumerate(P0_dict.keys())}
if self.use_bootstrap:
self.niter = 100
results = np.zeros((self.niter,self.nparams))
for i in tqdm(range(self.niter)):
sel = np.random.uniform(low=0,high=z.size,size=z.size).astype(int)
results[i] = minimize(self.minimize_errfunc,P0,args=((xy[0][sel],xy[1][sel]),z[sel],covariance[sel]),method='CG').x
error = stats.MAD(results,axis=0)
result = np.nanmedian(results,axis=0)
elif self.use_leastsqs:
result = minimize(self.minimize_errfunc,P0,args=(xy,z,covariance),method='CG').x
error = result*0.
flat_samples = np.zeros(1)
min_chi2 = self.emcee_errfunc(result,xy,z,covariance)
ddof = len(z)
else:
# Perform the least-sqaures fit
result = minimize(self.minimize_errfunc,P0,args=(xy,z,covariance),method='Nelder-Mead')
pos = result.x + 1e-4 * np.random.normal(size=(nwalkers, len(result.x)))
sampler = emcee.EnsembleSampler(nwalkers,len(result.x),self.emcee_errfunc,
args=(xy,z,covariance))
sampler.run_mcmc(pos,samples,progress=True)
flat_samples = sampler.get_chain(discard=discard,thin=thin,flat=True)
result = np.nanmedian(flat_samples,axis=0)
error = stats.MAD(flat_samples ,axis=0)
zchi = ((flat_samples - result[None,:])/error[None,:])**2
zchi = np.max(zchi,axis=1)
gd = (zchi < 15)
flat_samples = flat_samples[gd,:]
result = np.nanmedian(flat_samples,axis=0)
error = stats.MAD(flat_samples ,axis=0)
min_chi2 = self.emcee_errfunc(result,xy,z,covariance)
ddof = len(z)
z *= normalise
covariance *= normalise**2
Value_dict = {k:result[i] for k, i in self.idx.items()}
Error_dict = {k:error[i] for k, i in self.idx.items()}
for k in ['A','B']:
Value_dict[k] *= normalise
Error_dict[k] *= normalise
if return_array:
return result, error, flat_samples, min_chi2, ddof
else:
return Value_dict, Error_dict, flat_samples, min_chi2, ddof
def Priors(self,P):
prior = 0
for k,v in self.P0_priors.items():
prior += (P[self.idx[k]]-self.P0_priors[k]['mean'])**2/self.P0_priors[k]['width']**2
return prior
def set_fixed(self,**kwargs):
self.fixed = {'x0':False,'y0':False,'sigx':False,'sigy_scale':False,'phi':False,'A':False,'B':False}
for k,v in kwargs.items():
if not k in self.fixed:
raise KeyError('Key not in self.fixed')
self.fixed[k] = v
self.nparams = 0
for k,v in self.fixed.items():
if not v:
self.nparams += 1
assert self.nparams > 0, 'All parameters fixed?'
def set_defaults(self,**kwargs):
for k,v in kwargs.items():
self.defaults[k] = v
def emcee_errfunc(self,P,xy,z,cov):
if self.limfunc(P):
return -1e32
else:
#print(np.sum(self.Priors(P)))
chi2 = -np.sum( (self.func(P,xy) - z)**2/cov ) #- self.Priors(P)
if np.isfinite(chi2):
return chi2
else:
return -1e32
def minimize_errfunc(self,P,xy,z,cov):
if self.limfunc(P):
return 1e32
else:
return np.sum( (self.func(P,xy) - z)**2/cov )
def func(self,P, xy):
x,y = xy
for parameter in self.param_names:
if parameter in self.idx:
self.__dict__[parameter] = P[self.idx[parameter]]
else:
self.__dict__[parameter] = self.defaults[parameter]
if self.fixed['sigy_scale']:
sigy = self.sigx
else:
sigy = self.sigx*self.sigy_scale
Xr = (x - self.x0)/self.sigx * np.cos(self.phi) + (y-self.y0)/self.sigx * np.sin(self.phi)
Yr =-(x - self.x0)/sigy * np.sin(self.phi) + (y-self.y0)/sigy * np.cos(self.phi)
model = self.A * np.exp( - 0.5 * (Xr**2 + Yr**2)) + self.B
return model
# Error Lstsq
def ErrorLstSq(*args):
P = args[0]
func = args[1][0]
limits = args[1][1]
#z = args[5]
xy, z,cov, otherkeys = args[1][2:]
if limits(P):
return 0.*z + 1e32
else:
return np.sum( (func(P,xy, **otherkeys) - z)**2/cov )
def MC_ErrorLstSq(P,*args):
#P = args
func = args[0]
limits = args[1]
#z = args[5]
xy, z,cov, otherkeys = args[2:]
if limits(P):
return -1e32
else:
#print(np.sum((z - func(P,x,y,ra0,dec0, **kwargs))**2),flush=True)
return -np.sum( (func(P,xy, **otherkeys) - z)**2/cov )
|
py | b400bf15966b335df755eef6ce72a49b3fbd44c0 | # Copyright (c) 2018, NVIDIA CORPORATION.
import os
import sysconfig
from distutils.sysconfig import get_python_lib
import numpy as np
import versioneer
from Cython.Build import cythonize
from setuptools import find_packages, setup
from setuptools.extension import Extension
install_requires = ["numba", "cython"]
cython_files = ["cuspatial/bindings/**/*.pyx"]
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=[
"../../cpp/include/cuspatial",
os.environ['CONDA_PREFIX']+"/include/cudf",
os.path.dirname(sysconfig.get_path("include")),
np.get_include()
],
library_dirs=[get_python_lib()],
libraries=["cudf","cuspatial"],
language="c++",
extra_compile_args=["-std=c++14"]
)
]
setup(
name="cuspatial",
version=versioneer.get_version(),
description="cuSpatial: GPU-Accelerated Spatial and Trajectory Data Management and Analytics Library",
url="https://github.com/rapidsai/cuspatial",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
# Include the separately-compiled shared library
setup_requires=["cython"],
ext_modules=cythonize(extensions),
packages=find_packages(include=["cuspatial", "cuspatial.*"]),
cmdclass=versioneer.get_cmdclass(),
install_requires=install_requires,
zip_safe=False,
)
|
py | b400c053021fc6e12c3d4e4e3b68aa3cac4d9b66 | # IMPORTAÇÃO DOS OBJETOS CRIADOS #
from screenManager import Screen, Button
from inputBox import Inputbox
from simElements import Bolinha, Timer
import pygame as pg
import numpy as np
import pandas as pd
import operator
# VARIÁVEIS GLOBAIS #
# Aqui o Pygame é iniciado e são definidas todas as variáveis utilizadas como:
# altura e largura da janela, telas (menu de parâmetros, simulação e exibição dos resultados),
# input boxes para que o usuário possa inserir valores, botões, timer, etc.
pg.init()
pg.font.init()
w_w = 1024
w_h = 768
FONT = pg.font.Font(None, 32)
time = 10
done = False
colours = {"white": (255,255,255), "black": (0,0,0), "shadow": (90,90,90), "bg": (220,220,220), "red": (255,0,0), "green": (0,255,0), "blue": (0,0,255)}
values = {'FNN': 0.0, 'FRP': 0.0, 'SCD': 0.0, 'SCE': 0.0, 'SHI': 0.0, 'ERR': 0.0, 'ENR': 0.0, 'EPE': 0.0, 'PNS': 0.0, 'PRS': 0.0, 'PEX': 0.0, 'PRM': 0.0, 'PMN': 0.0, 'PLG': 0.0, 'SPP': 0.0, 'CSU': 0.0}
# Janelas
paramScreen = Screen("Valores", w_w, w_h, fill=colours['bg'])
simScreen = Screen("Simulação", w_w, w_h, fill=colours['bg'])
resScreen = Screen("Resultados", w_w, w_h, fill=colours['bg'])
win = paramScreen.makeCurrent() # Tela atual
finalResults = []
# Botões
bStart = Button(800,700,150,50,colours['blue'],colours['blue'],None,40,colours['black'],"Start")
bReturn = Button(50,700,150,50,colours['red'],colours['red'],None,40,colours['black'],"Return")
bExportResults = Button(775,700,200,50,colours['green'],colours['green'],None,40,colours['black'],"Export as .txt")
# REFERENTA À TELA DE PARÂMETROS #
# Parâmetros para tela de valores
FNN = Inputbox("FNN",100,100,50,32)
FRP = Inputbox("FRP",100,150,50,32)
SCD = Inputbox("SCD",100,200,50,32)
SCE = Inputbox("SCE",100,250,50,32)
SHI = Inputbox("SHI",100,300,50,32)
ERR = Inputbox("ERR",100,350,50,32)
ENR = Inputbox("ENR",100,400,50,32)
EPE = Inputbox("EPE",100,450,50,32)
PNS = Inputbox("PNS",600,100,50,32)
PRS = Inputbox("PRS",600,150,50,32)
PEX = Inputbox("PEX",600,200,50,32)
PRM = Inputbox("PRM",600,250,50,32)
PMN = Inputbox("PMN",600,300,50,32)
PLG = Inputbox("PLG",600,350,50,32)
SPP = Inputbox("SPP",600,400,50,32)
CSU = Inputbox("CSU",600,450,50,32)
# Armazenamento dos parâmetros numa lista
input_boxes = [FNN, FRP, SCD, SCE, SHI, ERR, ENR, EPE, PNS, PRS, PEX, PRM, PMN, PLG, SPP, CSU]
exportValues = np.zeros(len(input_boxes)) # Lista com os valores de cada parâmetros para sem exportado
# REFERENTE À TELA DE SIMULAÇÃO #
# Timer
clock = pg.time.Clock()
#timer = Timer(1,850,50)
pg.time.set_timer(pg.USEREVENT+1,1000)
timer_event = pg.USEREVENT+1
minsize = 15
maxsize = 100
# Cálculos da simulação
op = {"+": operator.add, "-": operator.sub} # Operadores para os tipos de interações
tabela = pd.read_csv("interactions.csv", index_col=0) # Tabela de interações
factor = 0.5 # Fator multiplicativo
dim = len(tabela.index) # Tamanho da tabela
# Elementos
fnn = Bolinha('FNN',values,450,130,minsize, maxsize)
frp = Bolinha('FRP',values,560,130,minsize, maxsize)
pex = Bolinha('PEX',values,250,350,minsize, maxsize)
prm = Bolinha('PRM',values,380,350,minsize, maxsize)
pmn = Bolinha('PMN',values,510,350,minsize, maxsize)
plg = Bolinha('PLG',values,630,350,minsize, maxsize)
csu = Bolinha('CSU',values,760,350,minsize, maxsize)
scd = Bolinha('SCD',values,910,300,minsize, maxsize)
sce = Bolinha('SCE',values,910,400,minsize, maxsize)
shi = Bolinha('SHI',values,910,500,minsize, maxsize)
err = Bolinha('ERR',values,100,300,minsize, maxsize)
enr = Bolinha('ENR',values,100,400,minsize, maxsize)
epe = Bolinha('EPE',values,100,500,minsize, maxsize)
pns = Bolinha('PNS',values,250,660,minsize, maxsize)
prs = Bolinha('PRS',values,380,660,minsize, maxsize)
spp = Bolinha('SPP',values,510,520,minsize, maxsize)
# Armazenamento dos elementos numa lista
elements = [fnn,frp,pex,prm,pmn,plg,csu,scd,sce,shi,err,enr,epe,pns,prs,spp]
# Valores finais
# LOOP PRINCIPAL DA APLICAÇÃO
# Aqui todas as lógicas são definidas. Também é aplicado o sistema de gerenciamento de janelas para que
# o usuário possa interagir com todo o sistema.
while not done:
mouse_pos = pg.mouse.get_pos()
mouse_click = pg.mouse.get_pressed()
paramScreen.screenUpdate()
simScreen.screenUpdate()
resScreen.screenUpdate()
# INICIO DA TELA DE VARIAVEIS
# Tela do menu de variáveis
if paramScreen.checkUpdate():
timer = Timer(time,850,50)
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
for box in input_boxes:
box.handle_event(event)
for box in input_boxes:
box.update()
bStart.showButton(paramScreen.screen)
for box in input_boxes:
box.draw(paramScreen.screen)
if bStart.focusCheck(mouse_pos,mouse_click):
for box in input_boxes:
values[box.name] = box.value
win = simScreen.makeCurrent()
paramScreen.endCurrent()
# FIM DA TELA DE VARIAVEIS
# INÍCIO DA TELA DE SIMULAÇÃO
elif simScreen.checkUpdate():
clock.tick(30)
timer.draw(simScreen.screen)
bReturn.showButton(simScreen.screen)
for element in elements:
element.draw(simScreen.screen)
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
if event.type == timer_event:
timer.time -= 1
# Loop para atualização dos valores dos parâmetros
for i in range (dim):
for j in range (dim):
a = tabela.index[i]
b = tabela.index[j]
operation = tabela.loc[a,b]
if pd.isnull(operation):
pass
else:
values[a] = op[operation](values[a],factor*values[b])
# for element in elements:
# element.update(values)
timer.update()
if timer.time <= 0:
timer.time = 0
win = resScreen.makeCurrent()
simScreen.endCurrent()
for element in elements:
element.update(values)
if bReturn.focusCheck(mouse_pos, mouse_click):
win = paramScreen.makeCurrent()
simScreen.endCurrent()
# FIM DA TELA DE SIMULAÇÃO
# INÍCIO DA TELA DE RESULTADOS
elif resScreen.checkUpdate():
bReturn.showButton(resScreen.screen)
bExportResults.showButton(resScreen.screen)
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
# Mostrando os valores na tela final
for box in input_boxes:
results_param = FONT.render(str(box.name), True, colours['black'])
results_value = FONT.render(str(values[box.name]), True, colours['black'])
resScreen.screen.blit(results_param,(box.rect.x,box.rect.y))
resScreen.screen.blit(results_value,(box.rect.x+90,box.rect.y))
if bExportResults.focusCheck(mouse_pos,mouse_click):
file = open("results.txt", "w+")
file.write(str(values))
file.close()
if bReturn.focusCheck(mouse_pos, mouse_click):
win = paramScreen.makeCurrent()
resScreen.endCurrent()
# FIM DA TELA DE RESULTADOS
pg.display.update()
pg.quit() |
py | b400c0c62fabb6f435beac8034360ba80bb07a13 | from .fund_event import FundEvent
from ...utilities.params import Events
class RebalanceEvent(FundEvent):
"""Rebalance Fund Event Class
Odin will periodically ensure that the capital investment in long and short
portfolios of a fund remains consistent with some predetermined weighting.
For many cases, this will be full dollar-neutrality, corresponding to an
equal split of equity. This event triggers rebalancing.
"""
def __init__(self, datetime):
"""Initialize parameters of the rebalance event object."""
super(RebalanceEvent, self).__init__(Events.rebalance, datetime)
|
py | b400c0f6e4473e037031d66e1624da18225c27f2 | # Copyright 2012 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
def update_image_read_acl(req, store_api, db_api, image):
"""Helper function to set ACL permissions on images in the image store"""
location_uri = image['location']
public = image['is_public']
image_id = image['id']
if location_uri:
try:
read_tenants = []
write_tenants = []
members = db_api.image_member_find(req.context,
image_id=image_id)
for member in members:
if not member['deleted']:
if member['can_share']:
write_tenants.append(member['member'])
else:
read_tenants.append(member['member'])
store_api.set_acls(req.context, location_uri, public=public,
read_tenants=read_tenants,
write_tenants=write_tenants)
except exception.UnknownScheme:
msg = _("Store for image_id not found: %s") % image_id
raise webob.exc.HTTPBadRequest(explanation=msg,
request=req,
content_type='text/plain')
|
py | b400c126cf732ac4199f4074aa9f11503ce4ef9a | """Utilities for kernel tests"""
from typing import Optional, Tuple
def _shape_param_to_id_str(shape_param: Optional[Tuple[Optional[int], ...]]) -> str:
"""Convert kernel input shape parameter used in `test_call.py` and `conftest.py`
into a human readable representation which is used in the pytest parameter id."""
if shape_param is None:
return "None"
shape_strs = tuple("indim" if dim is None else str(dim) for dim in shape_param)
if len(shape_strs) == 1:
return f"({shape_strs[0]},)"
return f"({', '.join(shape_strs)})"
|
py | b400c12ff7d47162387fb883a100920cc637f83b | from __future__ import annotations
import datetime
import logging
import pytz.reference
from blatann.event_type import EventSource, Event
from blatann.exceptions import InvalidOperationException
from blatann.event_args import DecodedReadCompleteEventArgs, DecodedWriteEventArgs
from blatann.services.decoded_event_dispatcher import DecodedReadWriteEventDispatcher
from blatann.services.current_time.constants import *
from blatann.services.current_time.data_types import *
from blatann.gatt.gatts import GattsService, GattsCharacteristicProperties
from blatann.waitables.event_waitable import IdBasedEventWaitable, EventWaitable
logger = logging.getLogger(__name__)
class CurrentTimeServer(object):
def __init__(self, service, is_writable=False,
enable_local_time_info_char=False, enable_ref_time_info_char=False):
"""
:type service: GattsService
:param is_writable:
:param enable_local_time_info_char:
:param enable_ref_time_info_char:
"""
self._service = service
self._is_writable = is_writable
self._has_local_time_info = enable_local_time_info_char
self._has_ref_time_info = enable_ref_time_info_char
self._current_time_read_callback = self._on_characteristic_read_auto
self._time_delta = datetime.timedelta()
self._on_current_time_write_event = EventSource("Current Time Write Event")
self._on_local_time_info_write_event = EventSource("Local Time Info Write Event")
self._current_time_dispatcher = DecodedReadWriteEventDispatcher(self, CurrentTime,
self._on_current_time_write_event, logger)
self._local_time_dispatcher = DecodedReadWriteEventDispatcher(self, LocalTimeInfo,
self._on_local_time_info_write_event, logger)
cur_time_char_props = GattsCharacteristicProperties(read=True, notify=True, write=is_writable,
variable_length=False, max_length=CurrentTime.encoded_size())
self._cur_time_char = service.add_characteristic(CURRENT_TIME_CHARACTERISTIC_UUID, cur_time_char_props)
self._cur_time_char.on_read.register(self._on_current_time_read)
self._cur_time_char.on_write.register(self._current_time_dispatcher)
if enable_local_time_info_char:
local_time_props = GattsCharacteristicProperties(read=True, notify=True, write=is_writable,
variable_length=False, max_length=LocalTimeInfo.encoded_size())
self._local_time_char = service.add_characteristic(LOCAL_TIME_INFO_CHARACTERISTIC_UUID, local_time_props)
self.set_local_time_info()
self._local_time_char.on_write.register(self._local_time_dispatcher)
if enable_ref_time_info_char:
ref_time_props = GattsCharacteristicProperties(read=True, notify=False, write=False,
variable_length=False, max_length=ReferenceTimeInfo.encoded_size())
self._ref_time_char = service.add_characteristic(REFERENCE_INFO_CHARACTERISTIC_UUID, ref_time_props)
self.set_reference_info()
self.set_time(datetime.datetime.utcfromtimestamp(0))
def _on_characteristic_read_auto(self):
return datetime.datetime.now() + self._time_delta
def _on_current_time_read(self, char, event_args):
dt = CurrentTime(self._current_time_read_callback())
self._cur_time_char.set_value(dt.encode())
def _on_current_time_write(self, char, event_args):
logger.info(event_args)
def _on_local_time_write(self, char, event_args):
logger.info(event_args)
@property
def is_writable(self) -> bool:
"""
Gets whether or not the service was configured to allow writes to the Current Time and Local Time Info
characteristics
"""
return self._is_writable
@property
def has_local_time_info(self) -> bool:
"""
Gets whether or not the service was configured to show the Local Time Info characteristic
"""
return self._has_local_time_info
@property
def has_reference_time_info(self) -> bool:
"""
Gets whether or not the service was configured to show the Reference Time Info characteristic
"""
return self._has_ref_time_info
@property
def on_current_time_write(self) -> Event[CurrentTimeServer, DecodedWriteEventArgs[CurrentTime]]:
"""
Event that is triggered when a client writes to the Current Time Characteristic.
Event emits a DecodedWriteEventArgs argument where the value is of type current_time.CurrentTime
"""
return self._on_current_time_write_event
@property
def on_local_time_info_write(self) -> Event[CurrentTimeServer, DecodedWriteEventArgs[LocalTimeInfo]]:
"""
Event that is triggered when a client writes to the Local Time Info Characteristic (if present).
Event emits a DecodedWriteEventArgs argument where the value is of type current_time.LocalTimeInfo
"""
return self._on_local_time_info_write_event
def configure_automatic(self):
"""
Configures the current time and local time info (if present) to use the system time
"""
now = datetime.datetime.now()
adj_reason = AdjustmentReason(AdjustmentReasonType.manual_time_update,
AdjustmentReasonType.dst_change,
AdjustmentReasonType.time_zone_change)
self.set_time(adjustment_reason=adj_reason)
if self.has_local_time_info:
local_timezone = pytz.reference.LocalTimezone()
offset = local_timezone.utcoffset(now).total_seconds()
dst = local_timezone.dst(now).total_seconds()
# Get the actual offset by subtracting the DST
offset -= dst
dst_enum = DaylightSavingsTimeOffset.from_seconds(dst)
self.set_local_time_info(offset/3600.0, dst_enum)
def set_time(self, date=None, adjustment_reason=None, characteristic_read_callback=None):
"""
Manually sets the time to report to the client.
If characteristic_read_callback is supplied,
the function is called for future reads on that characteristic to fetch the current time
If characteristic_read_callback is None,
future reads will be based off of the base datetime given and the time passed
:param date: The new base date to set the characteristic to. Future characteristic reads will base its time
off of this value if characteristic_read_callback is not supplied.
If the date is not supplied, will use the current system time
(same as configure_automatic but doesn't configure local time info)
:type date: datetime.datetime
:param adjustment_reason: Optional reason to give for the adjustment
:type adjustment_reason: AdjustmentReason
:param characteristic_read_callback: Optional callback to fetch subsequent time values.
Function signature should take no parameters and return a datetime object
"""
if date is None:
date = datetime.datetime.now()
self._time_delta = datetime.timedelta()
else:
delta = date - datetime.datetime.now()
if abs(delta.total_seconds()) < 1:
delta = datetime.timedelta()
self._time_delta = delta
if characteristic_read_callback:
self._current_time_read_callback = characteristic_read_callback
else:
self._current_time_read_callback = self._on_characteristic_read_auto
dt = CurrentTime(date, adjustment_reason)
self._cur_time_char.set_value(dt.encode(), True)
def set_local_time_info(self, timezone_hrs=0.0, dst_offset=DaylightSavingsTimeOffset.standard_time):
"""
Sets the local time info characteristic data. Only valid if has_local_time_info is True
:param timezone_hrs: The timezone to report, in hours
:param dst_offset: The daylight savings time offset
:type dst_offset: DaylightSavingsTimeOffset
:raises: InvalidOperationException if the service was not configured with the local time info
"""
if not self.has_local_time_info:
raise InvalidOperationException("Current Time service was not initialized with local time info")
lt = LocalTimeInfo(timezone_hrs, dst_offset)
self._local_time_char.set_value(lt.encode(), True)
def set_reference_info(self, time_source=TimeSource.unknown, accuracy=TimeAccuracy.unknown,
hours_since_update=None):
"""
Sets the time reference info characteristic data. Only valid if has_reference_time_info is True
:param time_source: The time source to use
:type time_source: TimeSource
:param accuracy: The accuracy to report
:type accuracy: TimeAccuracy
:param hours_since_update: The number of hours since time reference has been updated
:raises: InvalidOperationException if the service was not configured with the reference info
"""
if not self.has_reference_time_info:
raise InvalidOperationException("Current Time service was not initialized with reference info")
ri = ReferenceTimeInfo(time_source, accuracy, hours_since_update)
self._ref_time_char.set_value(ri.encode(), False)
@classmethod
def add_to_database(cls, gatts_database, is_writable=False,
enable_local_time_info_char=False, enable_ref_time_info_char=False):
service = gatts_database.add_service(CURRENT_TIME_SERVICE_UUID)
return cls(service, is_writable, enable_local_time_info_char, enable_ref_time_info_char)
class CurrentTimeClient(object):
def __init__(self, gattc_service):
"""
:type gattc_service: blatann.gatt.gattc.GattcService
"""
self._service = gattc_service
self._current_time_char = gattc_service.find_characteristic(CURRENT_TIME_CHARACTERISTIC_UUID)
self._local_time_info_char = gattc_service.find_characteristic(LOCAL_TIME_INFO_CHARACTERISTIC_UUID)
self._ref_info_char = gattc_service.find_characteristic(REFERENCE_INFO_CHARACTERISTIC_UUID)
self._on_current_time_updated_event = EventSource("Current Time Update Event")
self._on_local_time_info_updated_event = EventSource("Local Time Info Update Event")
self._on_reference_info_updated_event = EventSource("Reference Info Update Event")
self._current_time_dispatcher = DecodedReadWriteEventDispatcher(self, CurrentTime,
self._on_current_time_updated_event, logger)
self._local_time_dispatcher = DecodedReadWriteEventDispatcher(self, LocalTimeInfo,
self._on_local_time_info_updated_event, logger)
self._ref_time_dispatcher = DecodedReadWriteEventDispatcher(self, ReferenceTimeInfo,
self._on_reference_info_updated_event, logger)
@property
def on_current_time_updated(self) -> Event[CurrentTimeClient, DecodedReadCompleteEventArgs[CurrentTime]]:
"""
Event triggered when the server has updated its current time
"""
return self._on_current_time_updated_event
@property
def on_local_time_info_updated(self) -> Event[CurrentTimeClient, DecodedReadCompleteEventArgs[LocalTimeInfo]]:
"""
Event triggered when the server has updated its local time info
"""
return self._on_local_time_info_updated_event
@property
def on_reference_info_updated(self) -> Event[CurrentTimeClient, DecodedReadCompleteEventArgs[ReferenceTimeInfo]]:
"""
Event triggered when the server has updated its reference time info
"""
return self._on_reference_info_updated_event
@property
def has_local_time_info(self) -> bool:
return self._local_time_info_char is not None
@property
def has_reference_info(self) -> bool:
return self._ref_info_char is not None
@property
def can_enable_notifications(self) -> bool:
return self._current_time_char.subscribable
@property
def can_set_current_time(self) -> bool:
return self._current_time_char.writable
@property
def can_set_local_time_info(self) -> bool:
if not self.has_local_time_info:
return False
return self._local_time_info_char.writable
def read_time(self) -> EventWaitable[CurrentTimeClient, DecodedReadCompleteEventArgs[CurrentTime]]:
"""
Reads the time from the server
"""
self._current_time_char.read().then(self._current_time_dispatcher)
return EventWaitable(self._on_current_time_updated_event)
def set_time(self, date, adjustment_reason=None):
"""
Sets the time on the server to the datetime provided
:type date: datetime.datetime
:type adjustment_reason: AdjustmentReason
"""
dt = CurrentTime(date, adjustment_reason)
return self._current_time_char.write(dt.encode())
|
py | b400c133f967865e426137e6e4e372311a872449 | #!/usr/bin/env python3
import glob
import os
import pprint
import traceback
import click
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from tensorflow import make_ndarray
# Extraction function
def tflog2pandas(path: str) -> pd.DataFrame:
"""convert single tensorflow log file to pandas DataFrame
Parameters
----------
path : str
path to tensorflow log file
Returns
-------
pd.DataFrame
converted dataframe
"""
DEFAULT_SIZE_GUIDANCE = {
"compressedHistograms": 1,
"images": 1,
"scalars": 0, # 0 means load all
"tensors": 0, # 0 means load all
"histograms": 1,
}
runlog_data = pd.DataFrame({"metric": [], "value": [], "step": []})
try:
event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)
event_acc.Reload()
# tags = event_acc.Tags()["scalars"]
tags = event_acc.Tags()["tensors"]
tags = [t for t in tags if "DetectionBoxes" in t or "Loss" in t]
for tag in tags:
# event_list = event_acc.Scalars(tag)
event_list = event_acc.Tensors(tag)
values = list(map(lambda x: make_ndarray(x.tensor_proto), event_list))
step = list(map(lambda x: x.step, event_list))
r = {"metric": [tag] * len(step), "value": values, "step": step}
r = pd.DataFrame(r)
runlog_data = pd.concat([runlog_data, r])
# Dirty catch of DataLossError
except Exception:
print("Event file possibly corrupt: {}".format(path))
traceback.print_exc()
return runlog_data
def many_logs2pandas(event_paths):
all_logs = pd.DataFrame()
for path in event_paths:
log = tflog2pandas(path)
if log is not None:
if all_logs.shape[0] == 0:
all_logs = log
else:
all_logs = all_logs.append(log, ignore_index=True)
all_logs = all_logs.pivot_table(index = "step", columns = "metric", values = "value", aggfunc = "max").sort_index()
return all_logs
@click.command()
@click.argument("logdir-or-logfile")
@click.option(
"--write-pkl/--no-write-pkl", help="save to pickle file or not", default=False
)
@click.option(
"--write-csv/--no-write-csv", help="save to csv file or not", default=True
)
@click.option("--out-dir", "-o", help="output directory", default=".")
@click.option("--out-filename", "-f", help="output filename (whithout extension)", default="all_training_logs_in_one_file")
def main(logdir_or_logfile: str, write_pkl: bool, write_csv: bool, out_dir: str, out_filename: str):
# def main(logdir_or_logfile: str, write_pkl: bool = False, write_csv: bool = True, out_dir: str = ".", out_filename: str = "all_training_logs_in_one_file"):
"""This is a enhanced version of
https://gist.github.com/ptschandl/ef67bbaa93ec67aba2cab0a7af47700b
This script exctracts variables from all logs from tensorflow event
files ("event*"),
writes them to Pandas and finally stores them a csv-file or
pickle-file including all (readable) runs of the logging directory.
Example usage:
# create csv file from all tensorflow logs in provided directory (.)
# and write it to folder "./converted"
tflogs2pandas.py . --write-csv --no-write-pkl --o converted
# creaste csv file from tensorflow logfile only and write into
# and write it to folder "./converted"
tflogs2pandas.py tflog.hostname.12345 --write-csv --no-write-pkl --o converted
"""
pp = pprint.PrettyPrinter(indent=4)
if os.path.isdir(logdir_or_logfile):
# Get all event* runs from logging_dir subdirectories
event_paths = glob.glob(os.path.join(logdir_or_logfile, "event*"))
elif os.path.isfile(logdir_or_logfile):
event_paths = [logdir_or_logfile]
else:
raise ValueError(
"input argument {} has to be a file or a directory".format(
logdir_or_logfile
)
)
# Call & append
if event_paths:
pp.pprint(f"Found {len(event_paths)} tensorflow logs to process:")
# pp.pprint(event_paths)
all_logs = many_logs2pandas(event_paths)
pp.pprint("Head of created dataframe")
pp.pprint(all_logs.head())
os.makedirs(out_dir, exist_ok=True)
if write_csv:
print("saving to csv file")
out_file = os.path.join(out_dir, f"{out_filename}.csv")
print(out_file)
all_logs.to_csv(out_file, index=None)
if write_pkl:
print("saving to pickle file")
out_file = os.path.join(out_dir, f"{out_filename}.pkl")
print(out_file)
all_logs.to_pickle(out_file)
else:
print("No event paths have been found.")
if __name__ == "__main__":
main()
# main("./data/tfevents") |
py | b400c3b498b63c29ce34c0ebc7571c2384c35cff | from functools import partial
import theano
import theano.tensor as T
import numpy as np
from neupy import algorithms
from neupy.algorithms.gd.hessian import find_hessian_and_gradient
from utils import compare_networks
from data import simple_classification
from base import BaseTestCase
class HessianTestCase(BaseTestCase):
# In case of Hessian this solution will give
# significant improvement.
use_sandbox_mode = False
def test_hessian_exceptions(self):
with self.assertRaises(ValueError):
# Don't have step parameter
algorithms.Hessian((2, 3, 1), step=1)
def test_compare_bp_and_hessian(self):
x_train, x_test, y_train, y_test = simple_classification()
compare_networks(
# Test classes
algorithms.GradientDescent,
partial(algorithms.Hessian, penalty_const=1),
# Test data
(x_train, y_train, x_test, y_test),
# Network configurations
connection=(10, 15, 1),
shuffle_data=True,
verbose=False,
show_epoch=1,
# Test configurations
epochs=5,
show_comparison_plot=False
)
def test_hessian_computation(self):
x = T.scalar('x')
y = T.scalar('y')
f = x ** 2 + y ** 3 + 7 * x * y
# Gradient function:
# [2 * x + 7 * y,
# 3 * y ** 2 + 7 * x]
# Hessian function:
# [[2, 7 ]
# [7, 6 * y]]
hessian, gradient = find_hessian_and_gradient(f, [x, y])
func = theano.function([x, y], [hessian, gradient])
hessian_output, gradient_output = func(1, 2)
np.testing.assert_array_equal(
gradient_output,
np.array([16, 19])
)
np.testing.assert_array_equal(
hessian_output,
np.array([
[2, 7],
[7, 12],
])
)
def test_hessian_assign_step_exception(self):
with self.assertRaises(ValueError):
algorithms.Hessian((2, 3, 1), step=0.01)
|
py | b400c60dd267c40591063881e3f76299f2c8f7d3 | ###############################################################################
# YOU CAN MODIFY THIS FILE, BUT CHANGES WILL NOT APPLY DURING GRADING #
###############################################################################
import logging
import pickle
import random
logger = logging.getLogger(__name__)
class BasePlayer:
def __init__(self, player_id):
self.player_id = player_id
self.timer = None
self.queue = None
self.context = None
self.data = None
def get_action(self, state):
""" Implement a function that calls self.queue.put(ACTION) within the allowed time limit
See RandomPlayer and GreedyPlayer for examples.
"""
raise NotImplementedError
class DataPlayer(BasePlayer):
def __init__(self, player_id):
super().__init__(player_id)
try:
with open("data.pickle", "rb") as f:
self.data = pickle.load(f)
except (IOError, TypeError) as e:
logger.error(str(e))
self.data = None
class RandomPlayer(BasePlayer):
def get_action(self, state):
""" Randomly select a move from the available legal moves.
Parameters
----------
state : `isolation.Isolation`
An instance of `isolation.Isolation` encoding the current state of the
game (e.g., player locations and blocked cells)
"""
self.queue.put(random.choice(state.actions()))
class GreedyPlayer(BasePlayer):
""" Player that chooses next move to maximize heuristic score. This is
equivalent to a minimax search agent with a search depth of one.
"""
def score(self, state):
own_loc = state.locs[self.player_id]
own_liberties = state.liberties(own_loc)
return len(own_liberties)
def get_action(self, state):
"""Select the move from the available legal moves with the highest
heuristic score.
Parameters
----------
state : `isolation.Isolation`
An instance of `isolation.Isolation` encoding the current state of the
game (e.g., player locations and blocked cells)
"""
self.queue.put(max(state.actions(), key=lambda x: self.score(state.result(x))))
class MinimaxPlayer(BasePlayer):
""" Implement an agent using any combination of techniques discussed
in lecture (or that you find online on your own) that can beat
sample_players.GreedyPlayer in >80% of "fair" matches (see tournament.py
or readme for definition of fair matches).
Implementing get_action() is the only required method, but you can add any
other methods you want to perform minimax/alpha-beta/monte-carlo tree search,
etc.
**********************************************************************
NOTE: The test cases will NOT be run on a machine with GPU access, or
be suitable for using any other machine learning techniques.
**********************************************************************
"""
def get_action(self, state):
""" Choose an action available in the current state
See RandomPlayer and GreedyPlayer for examples.
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller is responsible for
cutting off the function after the search time limit has expired.
**********************************************************************
NOTE: since the caller is responsible for cutting off search, calling
get_action() from your own code will create an infinite loop!
See (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# randomly select a move as player 1 or 2 on an empty board, otherwise
# return the optimal minimax move at a fixed search depth of 3 plies
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
self.queue.put(self.minimax(state, depth=3))
def minimax(self, state, depth):
def min_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1))
return value
def max_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1))
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1))
def score(self, state):
own_loc = state.locs[self.player_id]
opp_loc = state.locs[1 - self.player_id]
own_liberties = state.liberties(own_loc)
opp_liberties = state.liberties(opp_loc)
return len(own_liberties) - len(opp_liberties)
|
py | b400c63a332483e20017e7f086cd1b7b903824d4 | import contextlib
import unittest
import tempfile
from io import StringIO
import numpy as np
from tests.utils import create_dummy_data, preprocess_lm_data, train_language_model
try:
from pyarrow import plasma
from fairseq.data.plasma_utils import PlasmaView, PlasmaStore
PYARROW_AVAILABLE = True
except ImportError:
PYARROW_AVAILABLE = False
dummy_path = "dummy"
@unittest.skipUnless(PYARROW_AVAILABLE, "")
class TestPlasmaView(unittest.TestCase):
def setUp(self) -> None:
self.tmp_file = tempfile.NamedTemporaryFile() # noqa: P201
self.path = self.tmp_file.name
self.server = PlasmaStore.start(path=self.path, nbytes=10000)
self.client = plasma.connect(self.path, num_retries=10)
def tearDown(self) -> None:
self.client.disconnect()
self.tmp_file.close()
self.server.kill()
def test_two_servers_do_not_share_object_id_space(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
server_2_path = self.path
with tempfile.NamedTemporaryFile() as server_1_path:
server = PlasmaStore.start(path=server_1_path.name, nbytes=10000)
arr1 = PlasmaView(
data_server_1, dummy_path, 1, plasma_path=server_1_path.name
)
assert len(arr1.client.list()) == 1
assert (arr1.array == data_server_1).all()
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=server_2_path)
assert (arr2.array == data_server_2).all()
assert (arr1.array == data_server_1).all()
server.kill()
def test_hash_collision(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
arr1 = PlasmaView(data_server_1, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
assert len(arr2.client.list()) == 1
assert (arr2.array == data_server_1).all()
# New hash key based on tuples
arr3 = PlasmaView(
data_server_2, dummy_path, (1, 12312312312, None), plasma_path=self.path
)
assert (
len(arr2.client.list()) == 2
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr2.client.list()
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr3.client.list()
), "No new object was created by using a novel hash key"
del arr3, arr2, arr1
@staticmethod
def _assert_view_equal(pv1, pv2):
np.testing.assert_array_equal(pv1.array, pv2.array)
def test_putting_same_array_twice(self):
data = np.array([4, 4, 4])
arr1 = PlasmaView(data, dummy_path, 1, plasma_path=self.path)
assert len(self.client.list()) == 1
arr1b = PlasmaView(
data, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
arr1c = PlasmaView(
None, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
assert len(self.client.list()) == 1
self._assert_view_equal(arr1, arr1b)
self._assert_view_equal(arr1, arr1c)
PlasmaView(
data, dummy_path, 2, plasma_path=self.path
) # new object id, adds new entry
assert len(self.client.list()) == 2
new_client = plasma.connect(self.path)
assert len(new_client.list()) == 2 # new client can access same objects
assert isinstance(arr1.object_id, plasma.ObjectID)
del arr1b
del arr1c
def test_plasma_store_full_raises(self):
with tempfile.NamedTemporaryFile() as new_path:
server = PlasmaStore.start(path=new_path.name, nbytes=10000)
with self.assertRaises(plasma.PlasmaStoreFull):
# 2000 floats is more than 2000 bytes
PlasmaView(
np.random.rand(10000, 1), dummy_path, 1, plasma_path=new_path.name
)
server.kill()
def test_object_id_overflow(self):
PlasmaView.get_object_id("", 2 ** 21)
def test_training_lm_plasma(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--use-plasma-view", "--plasma-path", self.path],
run_validation=True,
)
|
py | b400c6913d8e4b40d019474e26e564f0c1df03e4 | # Problem: https://www.hackerrank.com/challenges/symmetric-difference/problem
# Score: 10.0
m = int(input())
set_m = set(input().split())
n = int(input())
set_n = set(input().split())
print('\n'.join(sorted(set_m ^ set_n, key = int)))
|
py | b400c6daa6d066e7a41ea0f8b1c19dc70bcf8c9a | """
Test using LLDB data formatters with frozen objects coming from the expression parser.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ExprFormattersTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.cpp.
self.line = line_number('main.cpp',
'// Stop here')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr21765")
@skipIfTargetAndroid() # skipping to avoid crashing the test runner
@expectedFailureAndroid('llvm.org/pr24691') # we hit an assertion in clang
def test(self):
"""Test expr + formatters for good interoperability."""
self.build()
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type summary clear', check=False)
self.runCmd('type synthetic clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
"""Test expr + formatters for good interoperability."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
self.runCmd("command script import formatters.py")
self.runCmd("command script import foosynth.py")
if self.TraceOn():
self.runCmd("frame variable foo1 --show-types")
self.runCmd("frame variable foo1.b --show-types")
self.runCmd("frame variable foo1.b.b_ref --show-types")
self.filecheck("expression --show-types -- *(new_foo(47))", __file__,
'-check-prefix=EXPR-TYPES-NEW-FOO')
# EXPR-TYPES-NEW-FOO: (foo) ${{.*}} = {
# EXPR-TYPES-NEW-FOO-NEXT: (int) a = 47
# EXPR-TYPES-NEW-FOO-NEXT: (int *) a_ptr = 0x
# EXPR-TYPES-NEW-FOO-NEXT: (bar) b = {
# EXPR-TYPES-NEW-FOO-NEXT: (int) i = 94
# EXPR-TYPES-NEW-FOO-NEXT: (int *) i_ptr = 0x
# EXPR-TYPES-NEW-FOO-NEXT: (baz) b = {
# EXPR-TYPES-NEW-FOO-NEXT: (int) h = 97
# EXPR-TYPES-NEW-FOO-NEXT: (int) k = 99
# EXPR-TYPES-NEW-FOO-NEXT: }
# EXPR-TYPES-NEW-FOO-NEXT: (baz &) b_ref = 0x
# EXPR-TYPES-NEW-FOO-NEXT: }
# EXPR-TYPES-NEW-FOO-NEXT: }
self.runCmd("type summary add -F formatters.foo_SummaryProvider3 foo")
self.filecheck("expression foo1", __file__, '-check-prefix=EXPR-FOO1opts')
# EXPR-FOO1opts: (foo) $
# EXPR-FOO1opts-SAME: a = 12
# EXPR-FOO1opts-SAME: a_ptr = {{[0-9]+}} -> 13
# EXPR-FOO1opts-SAME: i = 24
# EXPR-FOO1opts-SAME: i_ptr = {{[0-9]+}} -> 25
# EXPR-FOO1opts-SAME: b_ref = {{[0-9]+}}
# EXPR-FOO1opts-SAME: h = 27
# EXPR-FOO1opts-SAME: k = 29
# EXPR-FOO1opts-SAME: WITH_OPTS
self.runCmd("type summary delete foo")
self.runCmd("type summary add -F formatters.foo_SummaryProvider foo")
self.expect("expression new_int(12)",
substrs=['(int *) $', ' = 0x'])
self.runCmd(
"type summary add -s \"${var%pointer} -> ${*var%decimal}\" \"int *\"")
self.expect("expression new_int(12)",
substrs=['(int *) $', '= 0x', ' -> 12'])
self.expect("expression foo1.a_ptr",
substrs=['(int *) $', '= 0x', ' -> 13'])
self.filecheck("expression foo1", __file__, '-check-prefix=EXPR-FOO1')
# EXPR-FOO1: (foo) $
# EXPR-FOO1-SAME: a = 12
# EXPR-FOO1-SAME: a_ptr = {{[0-9]+}} -> 13
# EXPR-FOO1-SAME: i = 24
# EXPR-FOO1-SAME: i_ptr = {{[0-9]+}} -> 25
# EXPR-FOO1-SAME: b_ref = {{[0-9]+}}
# EXPR-FOO1-SAME: h = 27
# EXPR-FOO1-SAME: k = 29
self.filecheck("expression --ptr-depth=1 -- new_foo(47)", __file__,
'-check-prefix=EXPR-PTR-DEPTH1')
# EXPR-PTR-DEPTH1: (foo *) $
# EXPR-PTR-DEPTH1-SAME: a = 47
# EXPR-PTR-DEPTH1-SAME: a_ptr = {{[0-9]+}} -> 48
# EXPR-PTR-DEPTH1-SAME: i = 94
# EXPR-PTR-DEPTH1-SAME: i_ptr = {{[0-9]+}} -> 95
self.filecheck("expression foo2", __file__, '-check-prefix=EXPR-FOO2')
# EXPR-FOO2: (foo) $
# EXPR-FOO2-SAME: a = 121
# EXPR-FOO2-SAME: a_ptr = {{[0-9]+}} -> 122
# EXPR-FOO2-SAME: i = 242
# EXPR-FOO2-SAME: i_ptr = {{[0-9]+}} -> 243
# EXPR-FOO2-SAME: h = 245
# EXPR-FOO2-SAME: k = 247
object_name = self.res.GetOutput()
object_name = object_name[7:]
object_name = object_name[0:object_name.find(' =')]
self.filecheck("frame variable foo2", __file__, '-check-prefix=VAR-FOO2')
# VAR-FOO2: (foo) foo2
# VAR-FOO2-SAME: a = 121
# VAR-FOO2-SAME: a_ptr = {{[0-9]+}} -> 122
# VAR-FOO2-SAME: i = 242
# VAR-FOO2-SAME: i_ptr = {{[0-9]+}} -> 243
# VAR-FOO2-SAME: h = 245
# VAR-FOO2-SAME: k = 247
# The object is the same as foo2, so use the EXPR-FOO2 checks.
self.filecheck("expression $" + object_name, __file__,
'-check-prefix=EXPR-FOO2')
self.runCmd("type summary delete foo")
self.runCmd(
"type synthetic add --python-class foosynth.FooSyntheticProvider foo")
self.expect("expression --show-types -- $" + object_name,
substrs=['(foo) $', ' = {', '(int) *i_ptr = 243'])
self.runCmd("n")
self.runCmd("n")
self.runCmd("type synthetic delete foo")
self.runCmd("type summary add -F formatters.foo_SummaryProvider foo")
self.expect(
"expression foo2",
substrs=[
'(foo) $',
'a = 7777',
'a_ptr = ',
' -> 122',
'i = 242',
'i_ptr = ',
' -> 8888'])
self.expect("expression $" + object_name + '.a',
substrs=['7777'])
self.expect("expression *$" + object_name + '.b.i_ptr',
substrs=['8888'])
self.expect(
"expression $" +
object_name,
substrs=[
'(foo) $',
'a = 121',
'a_ptr = ',
' -> 122',
'i = 242',
'i_ptr = ',
' -> 8888',
'h = 245',
'k = 247'])
self.runCmd("type summary delete foo")
self.runCmd(
"type synthetic add --python-class foosynth.FooSyntheticProvider foo")
self.expect("expression --show-types -- $" + object_name,
substrs=['(foo) $', ' = {', '(int) *i_ptr = 8888'])
self.runCmd("n")
self.runCmd("type synthetic delete foo")
self.runCmd("type summary add -F formatters.foo_SummaryProvider foo")
self.expect(
"expression $" +
object_name,
substrs=[
'(foo) $',
'a = 121',
'a_ptr = ',
' -> 122',
'i = 242',
'i_ptr = ',
' -> 8888',
'k = 247'])
process = self.dbg.GetSelectedTarget().GetProcess()
thread = process.GetThreadAtIndex(0)
frame = thread.GetSelectedFrame()
frozen = frame.EvaluateExpression("$" + object_name + ".a_ptr")
a_data = frozen.GetPointeeData()
error = lldb.SBError()
self.assertTrue(
a_data.GetUnsignedInt32(
error,
0) == 122,
'*a_ptr = 122')
ret = line_number("main.cpp", "Done initializing")
self.runCmd("thread until " + str(ret))
self.expect("frame variable numbers",
substrs=['1', '2', '3', '4', '5'])
self.expect("expression numbers",
substrs=['1', '2', '3', '4', '5'])
frozen = frame.EvaluateExpression("&numbers")
a_data = frozen.GetPointeeData(0, 1)
self.assertTrue(
a_data.GetUnsignedInt32(
error,
0) == 1,
'numbers[0] == 1')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
4) == 2,
'numbers[1] == 2')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
8) == 3,
'numbers[2] == 3')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
12) == 4,
'numbers[3] == 4')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
16) == 5,
'numbers[4] == 5')
frozen = frame.EvaluateExpression("numbers")
a_data = frozen.GetData()
self.assertTrue(
a_data.GetUnsignedInt32(
error,
0) == 1,
'numbers[0] == 1')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
4) == 2,
'numbers[1] == 2')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
8) == 3,
'numbers[2] == 3')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
12) == 4,
'numbers[3] == 4')
self.assertTrue(
a_data.GetUnsignedInt32(
error,
16) == 5,
'numbers[4] == 5')
|
py | b400c7c78a86857ea26b4f1144a0f0f4932873fc | import torch
import torch.nn as nn
import numpy as np
from model import dif_modules
from model.hyper_net import HyperNetwork
from utils import rend_util
from model.embedder import *
from model.ray_tracing import RayTracing
from model.sample_network import SampleNetwork
def add_latent(x, latent_code):
code_const = torch.broadcast_to(latent_code, (x.shape[0], latent_code.shape[1]))
return torch.cat([x, code_const], dim=1)
class ImplicitNetwork(nn.Module):
def __init__(
self,
feature_vector_size,
deform_net,
latent_code_size,
d_in,
d_out,
dims,
geometric_init=True,
bias=1.0,
skip_in=(),
weight_norm=True,
multires=0,
):
super().__init__()
self.deform_net = deform_net
dims = [d_in] + dims + [d_out + feature_vector_size]
self.embed_fn = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires)
self.embed_fn = embed_fn
dims[0] = input_ch
self.num_layers = len(dims)
self.skip_in = skip_in
for l in range(0, self.num_layers - 1):
if l + 1 in self.skip_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if geometric_init:
if l == self.num_layers - 2:
torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
elif multires > 0 and l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))
elif multires > 0 and l in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.softplus = nn.Softplus(beta=100)
def forward(self, x_in, hypo_params, latent_code, deform, ret_deforms=False):
assert hypo_params is not None
deformation = torch.zeros(x_in.shape[0], 3).cuda()
scalar_correction = torch.zeros(x_in.shape[0], 1).cuda()
x_deform = x_in
if deform:
adj_x = self.deform_net(x_in, params=hypo_params)["model_out"]
deformation = adj_x[0, :, :3]
scalar_correction = adj_x[0, :, 3:]
x_deform = x_in + deformation
if self.embed_fn is not None:
x_deform = self.embed_fn(x_deform)
x = x_deform
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.skip_in:
x = torch.cat([x, x_deform], 1) / np.sqrt(2)
x = lin(x)
if l < self.num_layers - 2:
x = self.softplus(x)
if deform:
x[:, :1] += scalar_correction
if ret_deforms:
return x, deformation, scalar_correction
return x
def gradient(self, x, hypo_params, latent_code, deform):
x.requires_grad_(True)
y = self.forward(x, hypo_params, latent_code, deform=deform)[:, :1]
d_output = torch.ones_like(y, requires_grad=False, device=y.device)
gradients = torch.autograd.grad(
outputs=y,
inputs=x,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return gradients.unsqueeze(1)
class RenderingNetwork(nn.Module):
def __init__(
self,
feature_vector_size,
latent_vector_size,
mode,
d_in,
d_out,
dims,
weight_norm=True,
multires_view=0
):
super().__init__()
self.mode = mode
dims = [d_in + feature_vector_size + latent_vector_size] + dims + [d_out]
self.embedview_fn = None
if multires_view > 0:
embedview_fn, input_ch = get_embedder(multires_view)
self.embedview_fn = embedview_fn
dims[0] += (input_ch - 3)
self.num_layers = len(dims)
for l in range(0, self.num_layers - 1):
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, points, normals, view_dirs, feature_vectors, latent_code):
if self.embedview_fn is not None:
view_dirs = self.embedview_fn(view_dirs)
if self.mode == 'idr':
rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)
elif self.mode == 'no_view_dir':
rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
elif self.mode == 'no_normal':
rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
x = add_latent(rendering_input, latent_code)
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
x = lin(x)
if l < self.num_layers - 2:
x = self.relu(x)
x = self.tanh(x)
return x
class IDRNetwork(nn.Module):
def __init__(self, conf):
super().__init__()
self.feature_vector_size = conf.get_int('feature_vector_size')
latent_code_dim = conf.get_int('latent_vector_size')
implicit_conf = conf.get_config('implicit_network')
implicit_conf["latent_code_size"] = latent_code_dim
rendering_conf = conf.get_config('rendering_network')
rendering_conf["latent_vector_size"] = latent_code_dim
self.should_deform = conf["deform"]
# Deform-Net
deform_config = conf.get_config("deform_network")
self.deform_net = dif_modules.SingleBVPNet(mode='mlp', in_features=3, out_features=4, **deform_config)
self.base_deform_reg = deform_config["base_reg_strength"]
self.deform_reg_decay = deform_config["reg_decay"]
self.deform_reg_strength = self.base_deform_reg
self.implicit_network = ImplicitNetwork(self.feature_vector_size, self.deform_net, **implicit_conf)
self.rendering_network = RenderingNetwork(self.feature_vector_size, **rendering_conf)
hyper_config = conf.get_config("hyper_network")
# Hyper-Net
self.hyper_net = HyperNetwork(hyper_in_features=latent_code_dim,
hypo_module=self.deform_net, **hyper_config)
self.hyper_reg_strength = 3
self.ray_tracer = RayTracing(**conf.get_config('ray_tracer'))
self.sample_network = SampleNetwork()
self.object_bounding_sphere = conf.get_float('ray_tracer.object_bounding_sphere')
def forward(self, input):
# Parse model input
intrinsics = input["intrinsics"]
uv = input["uv"]
pose = input["pose"]
object_mask = input["object_mask"].reshape(-1)
latent_code = input["obj"]
ray_dirs, cam_loc = rend_util.get_camera_params(uv, pose, intrinsics)
batch_size, num_pixels, _ = ray_dirs.shape
hypo_params = self.hyper_net(latent_code)
self.implicit_network.eval()
with torch.no_grad():
points, network_object_mask, dists = self.ray_tracer(
sdf=lambda x: self.implicit_network(x, hypo_params, latent_code, self.should_deform)[:, 0],
cam_loc=cam_loc,
object_mask=object_mask,
ray_directions=ray_dirs)
self.implicit_network.train()
points = (cam_loc.unsqueeze(1) + dists.reshape(batch_size, num_pixels, 1) * ray_dirs).reshape(-1, 3)
sdf_output, deformations, scalar_correction = \
self.implicit_network.forward(points, hypo_params, latent_code, self.should_deform, ret_deforms=True)
sdf_output = sdf_output[:, 0:1]
ray_dirs = ray_dirs.reshape(-1, 3)
if self.training:
surface_mask = network_object_mask & object_mask
surface_points = points[surface_mask]
surface_dists = dists[surface_mask].unsqueeze(-1)
surface_ray_dirs = ray_dirs[surface_mask]
surface_cam_loc = cam_loc.unsqueeze(1).repeat(1, num_pixels, 1).reshape(-1, 3)[surface_mask]
surface_output = sdf_output[surface_mask]
N = surface_points.shape[0]
# Sample points for the eikonal loss
eik_bounding_box = self.object_bounding_sphere
n_eik_points = batch_size * num_pixels // 2
eikonal_points = torch.empty(n_eik_points, 3).uniform_(-eik_bounding_box, eik_bounding_box).cuda()
eikonal_pixel_points = points.clone()
eikonal_pixel_points = eikonal_pixel_points.detach()
eikonal_points = torch.cat([eikonal_points, eikonal_pixel_points], 0)
points_all = torch.cat([surface_points, eikonal_points], dim=0)
output = self.implicit_network(surface_points, hypo_params, latent_code, self.should_deform)
surface_sdf_values = output[:N, 0:1].detach()
g = self.implicit_network.gradient(points_all, hypo_params, latent_code, self.should_deform)
surface_points_grad = g[:N, 0, :].clone().detach()
# For eikonal loss. Don't include deformations.
grad_theta = self.implicit_network.gradient(points_all, hypo_params, latent_code, False)[N:, 0, :]
differentiable_surface_points = self.sample_network(surface_output,
surface_sdf_values,
surface_points_grad,
surface_dists,
surface_cam_loc,
surface_ray_dirs)
else:
surface_mask = network_object_mask
differentiable_surface_points = points[surface_mask]
grad_theta = None
view = -ray_dirs[surface_mask]
rgb_values = torch.ones_like(points).float().cuda()
if differentiable_surface_points.shape[0] > 0:
rgb_values[surface_mask] = self.get_rbg_value(differentiable_surface_points, view, latent_code)
output = {
'points': points,
'rgb_values': rgb_values,
'sdf_output': sdf_output,
'network_object_mask': network_object_mask,
'object_mask': object_mask,
'grad_theta': grad_theta,
'deformation': deformations,
'correction': scalar_correction
}
return output
def get_rbg_value(self, points, view_dirs, latent_code):
hypo_params = self.hyper_net(latent_code)
output = self.implicit_network(points, hypo_params, latent_code, self.should_deform)
# Do not deform here to avoid normals issues
g = self.implicit_network.gradient(points, hypo_params, latent_code, deform=False)
normals = torch.zeros(g[:, 0, :].shape).cuda()
v_dirs = torch.zeros(view_dirs.shape).cuda()
feature_vectors = output[:, 1:]
rgb_vals = self.rendering_network(points, normals, v_dirs, feature_vectors, latent_code)
return rgb_vals
|
py | b400c8f9988ffd3a92adcbd770f1683fda42504f | ##############################################################
# How To Use The Message Widget For Blocks of Text
#############################################################
from tkinter import *
root = Tk()
root.title("Python Tkinter How To Use The Message Widget For Blocks of Text")
root.iconbitmap("Python Tkinter How To Use The Message Widget For Blocks of Text/icons/hulk.ico")
root.geometry("500x800")
def change():
my_message.config(text="And now something complety defferent!")
# First One
frame1 = LabelFrame(root, text="Left Justified")
frame1.pack(pady=20)
my_message = Message(frame1, text="This is some \n long text i am typing so that we can lok at it, isn look ",
font=("Helvetica", 18),
aspect=150,
justify=LEFT)
my_message.pack(pady=10, padx=10)
# Seconds One
frame2 = LabelFrame(root, text="Right Justified")
frame2.pack(pady=20)
my_message2 = Message(frame2, text="This is some \n long text i am typing so that we can lok at it, isn look ",
font=("Helvetica", 18),
aspect=150,
justify=RIGHT)
my_message2.pack(pady=10, padx=10)
# Thrird One
frame3 = LabelFrame(root, text="Center Justified")
frame3.pack(pady=20)
my_message3 = Message(frame3, text="This is some \n long text i am typing so that we can lok at it, isn look ",
font=("Helvetica", 18),
aspect=150,
justify=CENTER)
my_message3.pack(pady=10, padx=10)
# Button
my_button = Button(root, text="Change Text", command=change)
my_button.pack(pady=20)
root.mainloop() |
py | b400c90384fefba71f344130bcb05dc8b72de32b | """
The Purpose of the RoibalBot Python Program is to create an automated trading bot (functionality) on Binance
Utilized Python-Binance ( https://github.com/sammchardy/python-binance )
Advanced-Version capable of all exchanges, all coins (using cctx)
Created 4/14/2018 by Joaquin Roibal
V 0.01 - Updated 4/20/2018
v 0.02 - Updated 5/30/2018 - Converted to Advanced Version: https://github.com/Roibal/Cryptocurrency-Trading-Bots-Python-Beginner-Advance
Licensed under MIT License
Instructional Youtube Video: https://www.youtube.com/watch?v=8AAN03M8QhA
Did you enjoy the functionality of this bot? Tips always appreciated.
BTC:
ETH:
NOTE: All Subsequent Version of Program must contain this message, unmodified, in it's entirety
Copyright (c) 2018 by Joaquin Roibal
"""
from binance.client import Client
import time
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from binance.enums import *
import save_historical_data_Roibal
from BinanceKeys import BinanceKey1
api_key = BinanceKey1['api_key']
api_secret = BinanceKey1['api_secret']
client = Client(api_key, api_secret)
# get a deposit address for BTC
address = client.get_deposit_address(asset='BTC')
def run():
# get system status
#Create List of Crypto Pairs to Watch
list_of_symbols = ['BTCUSDT', 'ETHUSDT', 'BNBUSDT','BNBBTC', 'ETHBTC', 'LTCBTC']
micro_cap_coins = ['ICXBNB', 'BRDBNB', 'NAVBNB', 'RCNBNB']
#time_horizon = "Short"
#Risk = "High"
print("\n\n---------------------------------------------------------\n\n")
print("Hello and Welcome to the Crypto Trader Bot Python Script\nCreated 2018 by Joaquin Roibal (@BlockchainEng)")
print("A quick 'run-through' will be performed to introduce you to the functionality of this bot")
print("To learn more visit medium.com/@BlockchainEng or watch introductory Youtube Videos")
time.sleep(5)
try:
#Example Visualizations of Coins
save_historical_data_Roibal.save_historic_klines_csv('BTCUSDT', "1 hours ago UTC", "now UTC", Client.KLINE_INTERVAL_1MINUTE)
save_historical_data_Roibal.save_historic_klines_csv('ETHBTC', "6 months ago UTC", "now UTC", Client.KLINE_INTERVAL_1DAY)
save_historical_data_Roibal.save_historic_klines_csv('BRDBNB', "8 hours ago UTC", "now UTC", Client.KLINE_INTERVAL_3MINUTE)
save_historical_data_Roibal.save_historic_klines_csv('BTCUSDT', "12 months ago UTC", "now UTC", Client.KLINE_INTERVAL_1WEEK)
save_historical_data_Roibal.save_historic_klines_csv('ETHUSDT', "8 hours ago UTC", "now UTC", Client.KLINE_INTERVAL_15MINUTE)
#Visualize All Micro Cap Coins for 8 hour period and 3 minute Candlestick
for coin in micro_cap_coins:
save_historical_data_Roibal.save_historic_klines_csv(coin, "8 hours ago UTC", "now UTC", Client.KLINE_INTERVAL_3MINUTE)
save_historical_data_Roibal.save_historic_klines_csv(coin, "24 hours ago UTC", "now UTC", Client.KLINE_INTERVAL_15MINUTE)
save_historical_data_Roibal.save_historic_klines_csv(coin, "1 month ago UTC", "now UTC", Client.KLINE_INTERVAL_1DAY)
except():
pass
#Get Status of Exchange & Account
try:
status = client.get_system_status()
print("\nExchange Status: ", status)
#Account Withdrawal History Info
withdraws = client.get_withdraw_history()
print("\nClient Withdraw History: ", withdraws)
#get Exchange Info
info = client.get_exchange_info()
print("\nExchange Info (Limits): ", info)
except():
pass
# place a test market buy order, to place an actual order use the create_order function
# if '1000 ms ahead of server time' error encountered, visit https://github.com/sammchardy/python-binance/issues/249
try:
order = client.create_test_order(
symbol='BNBBTC',
side=Client.SIDE_BUY,
type=Client.ORDER_TYPE_MARKET,
quantity=100)
except:
print("\n \n \nATTENTION: NON-VALID CONNECTION WITH BINANCE \n \n \n")
#Get Info about Coins in Watch List
coin_prices(list_of_symbols)
coin_tickers(list_of_symbols)
#for symbol in list_of_symbols:
# market_depth(symbol)
#for coin in micro_cap_coins:
# visualize_market_depth(1, 1, coin)
for coin in micro_cap_coins:
scalping_orders(coin, 1, 1)
#get recent trades
trades = client.get_recent_trades(symbol='BNBBTC')
print("\nRecent Trades: ", trades)
print("Local Time: ", time.localtime())
print("Recent Trades Time: ", convert_time_binance(trades[0]['time']))
#get historical trades
try:
hist_trades = client.get_historical_trades(symbol='BNBBTC')
print("\nHistorical Trades: ", hist_trades)
except:
print('\n \n \nATTENTION: NON VALID CONNECTION WITH BINANCE \n \n \n')
#get aggregate trades
agg_trades = client.get_aggregate_trades(symbol='BNBBTC')
print("\nAggregate Trades: ", agg_trades)
def convert_time_binance(gt):
#Converts from Binance Time Format (milliseconds) to time-struct
#From Binance-Trader Comment Section Code
#gt = client.get_server_time()
print("Binance Time: ", gt)
print(time.localtime())
aa = str(gt)
bb = aa.replace("{'serverTime': ","")
aa = bb.replace("}","")
gg=int(aa)
ff=gg-10799260
uu=ff/1000
yy=int(uu)
tt=time.localtime(yy)
#print(tt)
return tt
def market_depth(sym, num_entries=20):
#Get market depth
#Retrieve and format market depth (order book) including time-stamp
i=0 #Used as a counter for number of entries
print("Order Book: ", convert_time_binance(client.get_server_time()))
depth = client.get_order_book(symbol=sym)
print(depth)
print(depth['asks'][0])
ask_tot=0.0
ask_price =[]
ask_quantity = []
bid_price = []
bid_quantity = []
bid_tot = 0.0
place_order_ask_price = 0
place_order_bid_price = 0
max_order_ask = 0
max_order_bid = 0
print("\n", sym, "\nDepth ASKS:\n")
print("Price Amount")
for ask in depth['asks']:
if i<num_entries:
if float(ask[1])>float(max_order_ask):
#Determine Price to place ask order based on highest volume
max_order_ask=ask[1]
place_order_ask_price=round(float(ask[0]),5)-0.0001
#ask_list.append([ask[0], ask[1]])
ask_price.append(float(ask[0]))
ask_tot+=float(ask[1])
ask_quantity.append(ask_tot)
#print(ask)
i+=1
j=0 #Secondary Counter for Bids
print("\n", sym, "\nDepth BIDS:\n")
print("Price Amount")
for bid in depth['bids']:
if j<num_entries:
if float(bid[1])>float(max_order_bid):
#Determine Price to place ask order based on highest volume
max_order_bid=bid[1]
place_order_bid_price=round(float(bid[0]),5)+0.0001
bid_price.append(float(bid[0]))
bid_tot += float(bid[1])
bid_quantity.append(bid_tot)
#print(bid)
j+=1
return ask_price, ask_quantity, bid_price, bid_quantity, place_order_ask_price, place_order_bid_price
#Plot Data
def scalping_orders(coin, wait=1, tot_time=1):
#Function for placing 'scalp orders'
#Calls on Visualizing Scalping Orders Function
ap, aq, bp, bq, place_ask_order, place_bid_order, spread, proj_spread, max_bid, min_ask = visualize_market_depth(wait, tot_time, coin)
print("Coin: {}\nPrice to Place Ask Order: {}\nPrice to place Bid Order: {}".format(coin, place_ask_order, place_bid_order))
print("Spread: {} % Projected Spread {} %".format(spread, proj_spread))
print("Max Bid: {} Min Ask: {}".format(max_bid, min_ask))
#Place Orders based on calculated bid-ask orders if projected > 0.05% (transaction fee)
#Documentation: http://python-binance.readthedocs.io/en/latest/account.html#orders
"""
if proj_spread > 0.05:
quant1=100 #Determine Code Required to calculate 'minimum' quantity
#Place Bid Order:
bid_order1 = client.order_limit_buy(
symbol=coin,
quantity=quant1,
price=place_bid_order)
#Place Ask Order
ask_order1 = client.order_limit_sell(
symbol=coin,
quantity=quant1,
price=place_ask_order)
#Place second order if current spread > 0.05% (transaction fee)
"""
def visualize_market_depth(wait_time_sec='1', tot_time='1', sym='ICXBNB', precision=5):
cycles = int(tot_time)/int(wait_time_sec)
start_time = time.asctime()
fig, ax = plt.subplots()
for i in range(1,int(cycles)+1):
ask_pri, ask_quan, bid_pri, bid_quan, ask_order, bid_order = market_depth(sym)
#print(ask_price)
plt.plot(ask_pri, ask_quan, color = 'red', label='asks-cycle: {}'.format(i))
plt.plot(bid_pri, bid_quan, color = 'blue', label = 'bids-cycle: {}'.format(i))
#ax.plot(depth['bids'][0], depth['bids'][1])
max_bid = max(bid_pri)
min_ask = min(ask_pri)
max_quant = max(ask_quan[-1], bid_quan[-1])
spread = round(((min_ask-max_bid)/min_ask)*100,5) #Spread based on market
proj_order_spread = round(((ask_order-bid_order)/ask_order)*100, precision)
price=round(((max_bid+min_ask)/2), precision)
plt.plot([price, price],[0, max_quant], color = 'green', label = 'Price - Cycle: {}'.format(i)) #Vertical Line for Price
plt.plot([ask_order, ask_order],[0, max_quant], color = 'black', label = 'Ask - Cycle: {}'.format(i))
plt.plot([bid_order, bid_order],[0, max_quant], color = 'black', label = 'Buy - Cycle: {}'.format(i))
#plt.plot([min_ask, min_ask],[0, max_quant], color = 'grey', label = 'Min Ask - Cycle: {}'.format(i))
#plt.plot([max_bid, max_bid],[0, max_quant], color = 'grey', label = 'Max Buy - Cycle: {}'.format(i))
ax.annotate("Max Bid: {} \nMin Ask: {}\nSpread: {} %\nCycle: {}\nPrice: {}"
"\nPlace Bid: {} \nPlace Ask: {}\n Projected Spread: {} %".format(max_bid, min_ask, spread, i, price, bid_order, ask_order, proj_order_spread),
xy=(max_bid, ask_quan[-1]), xytext=(max_bid, ask_quan[0]))
if i==(cycles+1):
break
else:
time.sleep(int(wait_time_sec))
#end_time = time.asctime()
ax.set(xlabel='Price', ylabel='Quantity',
title='Binance Order Book: {} \n {}\n Cycle Time: {} seconds - Num Cycles: {}'.format(sym, start_time, wait_time_sec, cycles))
plt.legend()
plt.show()
return ask_pri, ask_quan, bid_pri, bid_quan, ask_order, bid_order, spread, proj_order_spread, max_bid, min_ask
def coin_prices(watch_list):
#Will print to screen, prices of coins on 'watch list'
#returns all prices
prices = client.get_all_tickers()
print("\nSelected (watch list) Ticker Prices: ")
for price in prices:
if price['symbol'] in watch_list:
print(price)
return prices
def coin_tickers(watch_list):
# Prints to screen tickers for 'watch list' coins
# Returns list of all price tickers
tickers = client.get_orderbook_tickers()
print("\nWatch List Order Tickers: \n")
for tick in tickers:
if tick['symbol'] in watch_list:
print(tick)
return tickers
def portfolio_management(deposit = '10000', withdraw=0, portfolio_amt = '0', portfolio_type='USDT', test_acct='True'):
"""The Portfolio Management Function will be used to track profit/loss of Portfolio in Any Particular Currency (Default: USDT)"""
#Maintain Portfolio Statistics (Total Profit/Loss) in a file
pass
def Bollinger_Bands():
#This Function will calculate Bollinger Bands for Given Time Period
#EDIT: Will use Crypto-Signal for this functionality
#https://github.com/CryptoSignal/crypto-signal
pass
def buy_sell_bot():
pass
def position_sizing():
pass
def trailing_stop_loss():
pass
#Place Limit Order
"""
order = client.order_limit_buy(
symbol='BNBBTC',
quantity=100,
price='0.00001')
order = client.order_limit_sell(
symbol='BNBBTC',
quantity=100,
price='0.00001')
"""
"""
#trade aggregator (generator)
agg_trades = client.aggregate_trade_iter(symbol='ETHBTC', start_str='30 minutes ago UTC')
# iterate over the trade iterator
for trade in agg_trades:
pass
#print(trade)
# do something with the trade data
# convert the iterator to a list
# note: generators can only be iterated over once so we need to call it again
agg_trades = client.aggregate_trade_iter(symbol='ETHBTC', start_str='30 minutes ago UTC')
agg_trade_list = list(agg_trades)
# fetch 30 minute klines for the last month of 2017
klines = client.get_historical_klines("ETHBTC", Client.KLINE_INTERVAL_30MINUTE, "1 Dec, 2017", "1 Jan, 2018")
#for kline in klines:
#print(kline)
"""
#place an order on Binance
"""
order = client.create_order(
symbol='BNBBTC',
side=SIDE_BUY,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=100,
price='0.00001')
"""
if __name__ == "__main__":
run()
|
py | b400c9f2404b2e99af15e4d7bb77ed00f24f8e54 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from gcp_devrel.testing import eventually_consistent
from google.cloud import pubsub_v1
import pytest
import publisher
PROJECT = os.environ['GCLOUD_PROJECT']
TOPIC = 'publisher-test-topic'
@pytest.fixture
def client():
yield pubsub_v1.PublisherClient()
@pytest.fixture
def topic(client):
topic_path = client.topic_path(PROJECT, TOPIC)
try:
client.delete_topic(topic_path)
except:
pass
client.create_topic(topic_path)
yield topic_path
def test_list(client, topic, capsys):
@eventually_consistent.call
def _():
publisher.list_topics(PROJECT)
out, _ = capsys.readouterr()
assert topic in out
def test_create(client):
topic_path = client.topic_path(PROJECT, TOPIC)
try:
client.delete_topic(topic_path)
except:
pass
publisher.create_topic(PROJECT, TOPIC)
@eventually_consistent.call
def _():
assert client.get_topic(topic_path)
def test_delete(client, topic):
publisher.delete_topic(PROJECT, TOPIC)
@eventually_consistent.call
def _():
with pytest.raises(Exception):
client.get_topic(client.topic_path(PROJECT, TOPIC))
def test_publish(topic, capsys):
publisher.publish_messages(PROJECT, TOPIC)
out, _ = capsys.readouterr()
assert 'Published' in out
def test_publish_with_batch_settings(topic, capsys):
publisher.publish_messages_with_batch_settings(PROJECT, TOPIC)
out, _ = capsys.readouterr()
assert 'Published' in out
def test_publish_with_futures(topic, capsys):
publisher.publish_messages_with_futures(PROJECT, TOPIC)
out, _ = capsys.readouterr()
assert 'Published' in out
|
py | b400cad748f448a83aa7cb423fcc898fdfaa2cc4 | def test_ci():
assert (1, 2, 3) == (1, 2, 3)
|
py | b400cb2f9f99546105eaee937c979e78373a75c2 | #!/usr/bin/env python3
import re, os, argparse, uuid
from graphviz import Digraph
from graphviz import Graph as VizGraph
def get_params():
parser = argparse.ArgumentParser()
parser.add_argument('tags_file', type=str, help="Rust tags file")
parser.add_argument("--out", help="Output file",
action="store", default="out-graph.dot")
options = parser.parse_args()
return options
options = get_params()
definition_regex = re.compile(r'^(?P<name>\S+)\s+(?P<sourceFile>\S+)\s+/\^\s*(?P<signature>[\S \t]+)/;\"\s+(?P<type>\S+)\s(?P<metadata>[\s\S]+)')
def dict_from_regex(target, reg):
return [m.groupdict() for m in reg.finditer(target)]
def parse_tags_file(file_path):
return open(file_path).read().splitlines()
lines = parse_tags_file(options.tags_file)
def clear_signature(signature):
signature = signature.replace('{$','').replace('$/;','')
if signature[-1] == '$':
signature = signature.replace(signature[len(signature)-1], ')') if signature[-2] == '(' else signature[:-1]
signature = signature.translate({ord(c): f"\{c}" for c in "!@#$%^&*()[]{};:,./<>?\|`~-=_+"})
return signature
class Graph:
interface_refer="interface:"
implementation_refer="implementation:"
def __init__(self, header, signature, token_type, metadata):
self.header = header
self.node_name = f"{header}{str(uuid.uuid4()).replace('-','')}"
self.signature = signature
self.token_type = token_type
self.metadata = metadata
self.links = list()
self.fields = list()
self.methods = list()
def add_link(self, graph):
self.links.append(graph)
def is_typedef(self):
return self.token_type == 'typedef'
def is_impl(self):
return self.token_type == 'implementation'
def is_method(self):
return self.token_type == 'method'
def is_field(self):
return self.token_type == 'field'
def is_interface(self):
return self.token_type == 'interface'
def is_interface_refer(self,refer):
return f"{self.interface_refer}{refer}" in self.metadata
def is_implementation_refer(self, refer):
return f"{self.implementation_refer}{refer}" in self.metadata
class GraphsHandler:
def __init__(self):
self.all_graphs = list()
self.target_graphs = list()
def __contains(self, graph):
return any(x['name'] == graph['name'] for x in self.target_graphs)
def add(self, graph):
self.all_graphs.append(graph)
def make_structures(self):
# interfaces
interfaces = [g for g in self.all_graphs if g.is_interface()]
implementations = [g for g in self.all_graphs if g.is_impl()]
for interface in interfaces:
interface_fields = [g for g in self.all_graphs if g.is_interface_refer(interface.header)]
# methods
methods = [m for m in interface_fields if m.is_method()]
interface.methods.extend(methods)
# fields
fields = [f for f in interface_fields if not f.is_method()]
interface.fields.extend(fields)
# links
links = [l.node_name for l in implementations if l.header == interface.header or interface.header in l.signature]
interface.links.extend(links)
self.target_graphs.extend(interfaces)
# implementations
for impl in implementations:
impl_fields = [g for g in self.all_graphs if g.is_implementation_refer(impl.header)]
#methods
methods = [m for m in impl_fields if m.is_method()]
impl.methods.extend(methods)
#fields
fields = [f for f in impl_fields if not f.is_method()]
impl.fields.extend(fields)
# links ??
self.target_graphs.extend(implementations)
# struct ??
def draw(self):
s = Digraph('struct', filename=f"{options.out}.gv", node_attr={'shape': 'record'}, engine='dot', format='svg', strict=True)
#s.attr(size='6,6')
for graph in self.target_graphs:
lbl = self.__format_label(graph)
s.node(graph.node_name, label=lbl)
for link in graph.links:
s.edge(graph.node_name,link)
s.view()
def __format_label(self, graph):
label = "{"
label += f"{graph.header}|"
label += "\<\<fields\>\>\:\\n" if len(graph.fields) > 0 else ""
for f in graph.fields:
label += f"{f.signature}\\n"
label += "|" if len(graph.fields) > 0 else ""
label += "\<\<methods\>\>\:\\n" if len(graph.methods) > 0 else ""
for m in graph.methods:
label += f"{m.signature}\\n"
label += "}"
return label
graphs_handler = GraphsHandler()
#indexing
for line in lines:
if line.startswith('!'):
continue
metadata = dict_from_regex(line, definition_regex)[0]
if metadata['name'] == 'main':
continue
g = Graph(metadata['name'],
clear_signature(metadata['signature']),
metadata['type'],
metadata['metadata'])
graphs_handler.add(g)
graphs_handler.make_structures()
graphs_handler.draw()
|
py | b400cc4839d25e70ac66094682ba8a5635701798 | import time
import logging
from doxieautomator.doxie import DoxieAutomator
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(module)s %(name)s.%(funcName)s +%(lineno)s: %(levelname)-8s [%(process)d] %(message)s')
def notify_new_file(local_filename):
logging.info("New file downloaded from Doxie to: %s"%(local_filename))
if __name__ == "__main__":
import time
doxie = DoxieAutomator()
doxie.bind_to(notify_new_file)
try:
if doxie.is_running:
sys.exit("This app is already running!")
while True:
doxie.loop()
time.sleep(30)
finally:
doxie.stop() |
py | b400ccfd708547fb894b570cc110f7ea2d51560e | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import io
from command import DEFAULT_LOCAL_JOBS, PagedCommand
class Diff(PagedCommand):
common = True
helpSummary = "Show changes between commit and working tree"
helpUsage = """
%prog [<project>...]
The -u option causes '%prog' to generate diff output with file paths
relative to the repository root, so the output can be applied
to the Unix 'patch' command.
"""
PARALLEL_JOBS = DEFAULT_LOCAL_JOBS
def _Options(self, p):
p.add_option('-u', '--absolute',
dest='absolute', action='store_true',
help='paths are relative to the repository root')
def _ExecuteOne(self, absolute, project):
"""Obtains the diff for a specific project.
Args:
absolute: Paths are relative to the root.
project: Project to get status of.
Returns:
The status of the project.
"""
buf = io.StringIO()
ret = project.PrintWorkTreeDiff(absolute, output_redir=buf)
return (ret, buf.getvalue())
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
def _ProcessResults(_pool, _output, results):
ret = 0
for (state, output) in results:
if output:
print(output, end='')
if not state:
ret = 1
return ret
return self.ExecuteInParallel(
opt.jobs,
functools.partial(self._ExecuteOne, opt.absolute),
all_projects,
callback=_ProcessResults,
ordered=True)
|
py | b400cd427aa419a56cf10c6130786081ef91bcfe | from __future__ import unicode_literals
import os
import re
import subprocess
import sys
import ctypes
from distutils.version import LooseVersion
if sys.version_info[0] >= 3:
from shutil import which
else:
from backports.shutil_which import which
if sys.platform.startswith('win'):
if sys.version_info[0] >= 3:
from winreg import OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE, KEY_READ
else:
from _winreg import OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE, KEY_READ
def read_registry(key, valueex):
reg_key = OpenKey(HKEY_LOCAL_MACHINE, key, 0, KEY_READ)
return QueryValueEx(reg_key, valueex)
def getRhome(path, throw=False):
rhome = ""
if sys.platform.startswith("win") and path and not path.endswith(".exe"):
path = path + ".exe"
if not which(path):
return None
try:
rhome = subprocess.check_output([path, "RHOME"]).decode("utf-8").strip()
except Exception:
rhome = None
return rhome
def verify_Rhome(rhome):
if sys.platform.startswith("win"):
path = os.path.join(rhome, "bin", "x64" if sys.maxsize > 2**32 else "i386", "R.dll")
elif sys.platform == "darwin":
path = os.path.join(rhome, "lib", "libR.dylib")
else:
path = os.path.join(rhome, "lib", "libR.so")
if not os.path.exists(path):
if sys.platform.startswith("win"):
another_path = os.path.join(
rhome, "bin", "i386" if sys.maxsize > 2**32 else "x64", "R.dll")
if os.path.exists(another_path):
raise RuntimeError("R and python architectures do not match.")
raise RuntimeError("R share library ({}) does not exist.".format(path))
def Rhome():
rhome = None
if 'R_BINARY' in os.environ:
rhome = getRhome(os.environ['R_BINARY'], throw=True)
if not rhome:
raise RuntimeError("R binary ({}) does not exist.".format(os.environ['R_BINARY']))
if not rhome and 'R_HOME' in os.environ:
rhome = os.environ['R_HOME']
if not os.path.isdir(rhome):
raise RuntimeError("R_HOME ({}) does not exist.".format(rhome))
return rhome
if not rhome:
rhome = getRhome("R")
try:
if sys.platform.startswith("win") and not rhome:
rhome = read_registry("Software\\R-Core\\R", "InstallPath")[0]
except Exception:
rhome = ""
if rhome:
os.environ['R_HOME'] = rhome
else:
raise RuntimeError("Cannot determine R HOME.")
verify_Rhome(rhome)
return rhome
def ensure_path(rhome=None):
if not rhome:
rhome = Rhome()
if sys.platform.startswith("win"):
libRdir = os.path.join(rhome, "bin", "x64" if sys.maxsize > 2**32 else "i386")
# make sure Rblas.dll can be reached
try:
msvcrt = ctypes.cdll.msvcrt
msvcrt._wgetenv.restype = ctypes.c_wchar_p
path = msvcrt._wgetenv(ctypes.c_wchar_p("PATH"))
if libRdir not in path:
path = libRdir + ";" + path
msvcrt._wputenv(ctypes.c_wchar_p("PATH={}".format(path)))
except Exception as e:
print(e)
pass
def rversion(rhome=None):
if not rhome:
rhome = Rhome()
try:
output = subprocess.check_output(
[os.path.join(rhome, "bin", "R"), "--slave", "-e", "cat(as.character(getRversion()))"],
stderr=subprocess.STDOUT).decode("utf-8").strip()
version = LooseVersion(output)
except Exception:
version = LooseVersion("1000.0.0")
return version
UTFPATTERN = re.compile(b"\x02\xff\xfe(.*?)\x03\xff\xfe", re.S)
if sys.version_info[0] >= 3:
DECODE_ERROR_HANDLER = "backslashreplace"
else:
DECODE_ERROR_HANDLER = "replace"
def rconsole2str(buf):
ret = ""
m = UTFPATTERN.search(buf)
while m:
a, b = m.span()
ret += system2utf8(buf[:a]) + m.group(1).decode("utf-8", DECODE_ERROR_HANDLER)
buf = buf[b:]
m = UTFPATTERN.search(buf)
ret += system2utf8(buf)
return ret
if sys.platform == "win32":
"""
The following only works after setlocale in C and
R will initialize it for us. To mimic the behaviour, consider
```
ctypes.cdll.msvcrt.setlocale(0, ctypes.c_char_p("chinese-traditional"))
```
"""
mbtowc = ctypes.cdll.msvcrt.mbtowc
mbtowc.argtypes = [
ctypes.POINTER(ctypes.c_wchar),
ctypes.POINTER(ctypes.c_char),
ctypes.c_size_t]
mbtowc.restype = ctypes.c_int
wctomb = ctypes.cdll.msvcrt.wctomb
wctomb.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_wchar]
wctomb.restype = ctypes.c_int
def system2utf8(buf):
wcbuf = ctypes.create_unicode_buffer(1)
text = ""
while buf:
n = mbtowc(wcbuf, buf, len(buf))
if n <= 0:
break
text += wcbuf[0]
buf = buf[n:]
return text
def utf8tosystem(text):
s = ctypes.create_string_buffer(10)
buf = b""
for c in text:
try:
n = wctomb(s, c)
except Exception:
n = -1
if n > 0:
buf += s[:n]
else:
buf += "\\u{{{}}}".format(hex(ord(c))[2:]).encode("ascii")
return buf
else:
def system2utf8(buf):
return buf.decode("utf-8", DECODE_ERROR_HANDLER)
def utf8tosystem(text):
return text.encode("utf-8", "backslashreplace")
def id_str(x):
return str(id(x))
|
py | b400cd6aff5fc999e1eb54c8a1765855a2059a77 | ''' This file is called from the iOS app.
Specifically, the app calls the statement: cd /home/pi/Home_Remote; python SSHtoHomeRemote.py [args]
The arguments vary for each style of remote. For this example of the LED controller, we use the one-button remote.
As a result, we can have a very basic set of args: 1 0 0 0 0.
Thus the exact statement used from this remote is: cd /home/pi/Home_Remote; python SSHtoHomeRemote.py 1 0 0 0 0'''
import LEDController
#first arg is the function to call
function = sys.argv[1]
#takes up to four additional args for multiple uses (use 0 if not using them)
arg1 = sys.argv[2]
arg2 = sys.argv[3]
arg3 = sys.argv[4]
arg4 = sys.argv[5]
proxyled = LEDController()
if(function == "1"):
proxyled.powerOnOff()
exit()
|
py | b400ce34705e939f1362a3ee1c0d69c583b20efc |
import os
import django_heroku
# https://devcenter.heroku.com/articles/cloudinary
import cloudinary
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# localsolution
# f local_settings import SECRET_KEY_HIDD
from django.utils import timezone
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# key in lynxven/bin/activate
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['https://lynxwasp.herokuapp.com/']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'product_manager_ices.apps.ProductManagerIcesConfig',
'cloudinary',
'users_app.apps.UsersConfig',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth.registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LynxWasp.urls'
# Rest framwork validations
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
],
# DEFAULT PAGINATION
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 7,
# DEFAULT PERMISSION
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
}
# https://django-rest-auth.readthedocs.io/en/latest/faq.html
# REST_AUTH_SERIALIZERS = {
# 'USER_DETAILS_SERIALIZER': 'demo.serializers.UserSerializer'
# }
# crsf token form cookies
CSRF_USE_SESSIONS = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LynxWasp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'structure2.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK = "bootstrap4"
LOGIN_REDIRECT_URL = "homepage"
LOGIN_URL = "login"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# close session at close page
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# django heroku local
django_heroku.settings(locals())
# When you include the django.contrib.sites to your INSTALLED_APPS and run the command "python manage.py migrate"
# the app automatically creates a object into "django_site" table (with domain name and display name equals to "example.com".
# There is no need to create it by yourself.
SITE_ID = 1
AUTH_USER_MODEL = 'users_app.User'
|
py | b400d141eaef8b80cfe70485a843d11ff7e5279a | # author: Natallia Kokash, [email protected]
# Maps KIEM resources to Neo4j graph
from neo4j import GraphDatabase, Session
from model.publication import Publication
from model.contributor import Contributor
from model.industry_identifier import IndustryIdentifier
from model.reference_bibliographic import Reference
from model.reference_index import IndexReference
from model.publication_external import ExternalPublication
from model.index_external import ExternalIndex
from model.cluster_bibliographic import Cluster
from model.cluster_index import IndexCluster
from model.batch import Batch
from typing import List, Union
import logging
class DBConnector:
def __init__(self, uri: str, user: str, password: str):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
self.logger = logging.getLogger('pdfParser.dbConnector.' + self.__class__.__name__)
self.logger.debug('Created an instance of: %s ', self.__class__.__name__)
def cdisconnected(self):
self.driver.cdisconnected()
# Delete
def clear_graph(self, session: Session = None):
if session is None:
session = self.driver.session()
cql_delete_relationships = "MATCH (a) -[r] -> () DELETE a, r"
cql_delete_nodes = "MATCH (a) DELETE a"
session.run(cql_delete_relationships)
session.run(cql_delete_nodes)
def delete_node(self, node_uuid: str, session: Session = None):
if session is None:
session = self.driver.session()
cql_delete_nodes = "MATCH (a {UUID: $node_uuid}) DETACH DELETE a"
session.run(cql_delete_nodes, node_uuid=node_uuid)
def delete_empty_nodes(self, node_class: str, session: Session = None):
if session is None:
session = self.driver.session()
cql_delete_nodes = "MATCH (a: {node_class}) WHERE size((a)--())=0 DELETE a".format(node_class=node_class)
session.run(cql_delete_nodes)
def delete_pub(self, pub: Publication, session: Session = None):
if session is None:
session = self.driver.session()
self.logger.debug("# nodes before deleting publication data: %d", self.query_node_count())
self.delete_node(pub.UUID, session)
# Delete disconnected contributors
self.delete_empty_nodes(Contributor.__name__)
# Delete disconnected industry identifiers
# TODO remove obsolete class 'Identifier'
self.delete_empty_nodes("Identifier")
self.delete_empty_nodes(IndustryIdentifier.__name__)
# Delete references
if pub.bib_refs:
for ref in pub.bib_refs:
self.delete_node(ref.UUID, session)
self.delete_empty_nodes(Reference.__name__)
if pub.index_refs:
for ref in pub.index_refs:
self.delete_node(ref.UUID, session)
self.delete_empty_nodes(IndexReference.__name__)
# TODO test if this works as references are connected to clusters
# Delete disconnected external publications and external index references
self.delete_empty_nodes(ExternalPublication.__name__)
self.delete_empty_nodes(ExternalIndex.__name__)
# Delete empty clusters
self.delete_empty_nodes(Cluster.__name__)
self.delete_empty_nodes(IndexCluster.__name__)
self.logger.debug("# nodes after deleting publication data: %d", self.query_node_count())
# Create
# Create publication nodes with relationships
def create_pub(self, pub: Publication, session: Session = None):
if session is None:
session = self.driver.session()
# TODO create or update
self.logger.debug("# nodes before adding publication data: %d", self.query_node_count())
# Create publication
cql_create_pub = """CREATE (:Publication {0})"""
session.run(cql_create_pub.format(pub.serialize()))
# Create industry identifiers
cql_create_id = """CREATE (:IndustryIdentifier {0})"""
cql_create_pub_has_identifier = """MATCH (a:Publication), (b:IndustryIdentifier)
WHERE a.UUID = $pub_uuid AND b.UUID = $id_uuid CREATE (a)-[:HasIdentifier]->(b)"""
for industry_id in pub.identifiers:
session.run(cql_create_id.format(industry_id.serialize()))
session.run(cql_create_pub_has_identifier, pub_uuid=pub.UUID, id_uuid=industry_id.UUID)
# Create contributors: editors or authors
cql_create_contributor = """CREATE (:Contributor {0})"""
cql_create_pub_has_contributor = """MATCH (a:Publication), (b:Contributor)
WHERE a.UUID = $pub_uuid AND b.UUID = $c_uuid CREATE (a)-[:HasContributor {num: $num}]->(b)"""
# Create editors
for idx, editor in enumerate(pub.editors):
session.run(cql_create_contributor.format(editor.serialize()))
session.run(cql_create_pub_has_contributor, pub_uuid=pub.UUID, c_uuid=editor.UUID, num=idx)
# Create authors
for idx, author in enumerate(pub.authors):
session.run(cql_create_contributor.format(author.serialize()))
session.run(cql_create_pub_has_contributor, pub_uuid=pub.UUID, c_uuid=author.UUID, num=idx)
# Create bibliographic references
self.create_bib_refs(pub, session)
# Create index references
self.create_index_refs(pub, session)
self.logger.debug("# nodes after adding publication data: %d", self.query_node_count())
# Create bibliographic references
def create_bib_refs(self, pub: Publication, session: Session = None):
if session is None:
session = self.driver.session()
cql_create_ref = """CREATE (:Reference {0})"""
cql_create_pub_cites_ref = """MATCH (a:Publication), (b:Reference)
WHERE a.UUID = $pub_uuid AND b.UUID = $ref_uuid CREATE (a)-[:Cites]->(b)"""
for ref in pub.bib_refs:
session.run(cql_create_ref.format(ref.serialize()))
session.run(cql_create_pub_cites_ref, pub_uuid=pub.UUID, ref_uuid=ref.UUID)
if ref.refers_to is not None:
for ext_pub in ref.refers_to:
self.create_ext_pub(ext_pub, ref.UUID, session)
# Create index references
def create_index_refs(self, pub: Publication, session: Session = None):
if session is None:
session = self.driver.session()
cql_create_idx = """CREATE (:IndexReference {0})"""
cql_create_pub_includes_idx = """MATCH (a:Publication), (b:IndexReference)
WHERE a.UUID = $pub_uuid AND b.UUID = $ref_uuid CREATE (a)-[:Includes]->(b)"""
for idx in pub.index_refs:
session.run(cql_create_idx.format(idx.serialize()))
session.run(cql_create_pub_includes_idx, pub_uuid=pub.UUID, ref_uuid=idx.UUID)
if idx.refers_to is not None:
for ext_idx in idx.refers_to:
self.create_ext_index(ext_idx, idx.UUID, session)
# Create external publications that disambiguate references
def create_ext_pub(self, ext_pub: ExternalPublication, ref_uuid: str, session: Session = None):
if session is None:
session = self.driver.session()
# TODO check if it already exists?
cql_create_ext_pub = """CREATE (b:ExternalPublication {0})"""
cql_create_ref_refers_to_ext_pub = """MATCH (a:Reference), (b:ExternalPublication)
WHERE a.UUID = $ref_uuid AND b.UUID = $ext_pub_uuid CREATE (a)-[:RefersTo]->(b)"""
session.run(cql_create_ext_pub.format(ext_pub.serialize()))
session.run(cql_create_ref_refers_to_ext_pub, ref_uuid=ref_uuid, ext_pub_uuid=ext_pub.UUID)
# Create external nodes that disambiguate index references
def create_ext_index(self, ext_idx: ExternalIndex, ref_uuid: str, session: Session = None):
if session is None:
session = self.driver.session()
# TODO check if it already exists?
cql_create_ext_idx = """CREATE (b:ExternalIndex {0})"""
cql_create_ref_refers_to_ext_idx = """MATCH (a:IndexReference), (b:ExternalIndex)
WHERE a.UUID = $ref_uuid AND b.UUID = $ext_pub_uuid CREATE (a)-[:RefersTo]->(b)"""
session.run(cql_create_ext_idx.format(ext_idx.serialize()))
session.run(cql_create_ref_refers_to_ext_idx, ref_uuid=ref_uuid, ext_pub_uuid=ext_idx.UUID)
# Create cluster of bibliographic references
def create_cluster(self, cluster: Cluster, session: Session = None):
if session is None:
session = self.driver.session()
# Save only meaningful clusters
if len(cluster.refs) > 1:
cql_create_cluster = """CREATE (b:Cluster {0})"""
session.run(cql_create_cluster.format(cluster.serialize()))
for ref in cluster.refs:
cql_create_ref_belongs_to_cluster = """MATCH (a:Reference), (b:Cluster)
WHERE a.UUID = $ref_uuid AND b.UUID = $cluster_uuid CREATE (a)-[:BelongsTo]->(b)"""
session.run(cql_create_ref_belongs_to_cluster, ref_uuid=ref.UUID, cluster_uuid=cluster.UUID)
# Create cluster of index references
def create_index_cluster(self, cluster: IndexCluster, session: Session = None):
if session is None:
session = self.driver.session()
# Save only meaningful clusters
if len(cluster.refs) > 1:
cql_create_cluster = """CREATE (b:IndexCluster {0})"""
session.run(cql_create_cluster.format(cluster.serialize()))
for ref in cluster.refs:
cql_create_ref_belongs_to_cluster = """MATCH (a:IndexReference), (b:IndexCluster)
WHERE a.UUID = $ref_uuid AND b.UUID = $cluster_uuid CREATE (a)-[:BelongsTo]->(b)"""
session.run(cql_create_ref_belongs_to_cluster, ref_uuid=ref.UUID, cluster_uuid=cluster.UUID)
# Create batch clusters
def create_clusters(self, batch: Batch, session: Session = None):
if session is None:
session = self.driver.session()
# Add bibliographic clusters
if batch.cluster_set_bib and batch.cluster_set_bib.clusters:
for cluster in batch.cluster_set_bib.clusters:
self.create_cluster(cluster, session)
# Add index clusters
if batch.cluster_set_index and batch.cluster_set_index.clusters:
for cluster in batch.cluster_set_index.clusters:
self.create_cluster(cluster, session)
# Create knowledge graph
def create_graph(self, batch: Batch):
with self.driver.session() as session:
# Create publications
if batch.publications:
for pub in batch.publications:
try:
self.create_pub(pub, session)
except:
self.logger.error("Failed to serialize publication: ", pub.UUID)
self.create_clusters(batch, session)
# Query
# Retrieve all publications
def query_pubs(self, limit: int = None) -> List[Publication]:
pubs = []
cql_pubs = "MATCH (a:Publication) return a"
if limit is not None:
cql_pubs += " limit " + str(limit)
with self.driver.session() as session:
nodes = session.run(cql_pubs)
db_pubs = [record for record in nodes.data()]
for db_pub in db_pubs:
pubs.append(Publication.deserialize(db_pub["a"]))
return pubs
# Find node by uuid
def query_pub(self, node_uuid: str) -> Union[Publication, None]:
cql_pubs = "MATCH (a) where a.UUID=$node_uuid return a"
with self.driver.session() as session:
nodes = session.run(cql_pubs, node_uuid=node_uuid)
db_pubs = [record for record in nodes.data()]
if len(db_pubs) > 0:
return Publication.deserialize(db_pubs[0]["a"])
return None
# Find publication by zip_path
def query_pub_by_zip(self, zip_path: str) -> Union[Publication, None]:
cql_pubs = "MATCH (a:Publication) where a.zip_path=$zip_path return a"
with self.driver.session() as session:
nodes = session.run(cql_pubs, zip_path=zip_path)
db_pubs = [record for record in nodes.data()]
if len(db_pubs) > 0:
return Publication.deserialize(db_pubs[0]["a"])
return None
# Retrieve bibliographic references for a publication
def query_pub_bib_refs(self, pub_uuid: str) -> List[Reference]:
refs = []
cql_pub_cites_ref = "MATCH (a:Publication)-[r:Cites]->(b:Reference) WHERE a.UUID = $pub_uuid return b"
with self.driver.session() as session:
nodes = session.run(cql_pub_cites_ref, pub_uuid=pub_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
refs.append(Reference.deserialize(db_ref["b"]))
return refs
# Retrieve index references for a publication
def query_pub_index_refs(self, pub_uuid: str) -> List[IndexReference]:
refs = []
cql_pub_incl_idx = "MATCH (a:Publication)-[r:Includes]->(b:IndexReference) WHERE a.UUID = $pub_uuid return b"
with self.driver.session() as session:
nodes = session.run(cql_pub_incl_idx, pub_uuid=pub_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
refs.append(IndexReference.deserialize(db_ref["b"]))
return refs
# Retrieve publication identifiers
def query_pub_identifiers(self, pub_uuid: str) -> List[IndustryIdentifier]:
identifiers = []
cql_pub_incl_idx = "MATCH (a:Publication)-[r:HasIdentifier]->(b:IndustryIdentifier) WHERE a.UUID = $pub_uuid return b"
with self.driver.session() as session:
nodes = session.run(cql_pub_incl_idx, pub_uuid=pub_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
identifiers.append(IndustryIdentifier.deserialize(db_ref["b"]))
return identifiers
# Retrieve publication contributors
def query_pub_contributors(self, pub_uuid: str) -> List[Contributor]:
contributors = []
cql_pub_incl_idx = "MATCH (a:Publication)-[r:HasContributor]->(b:Contributor) WHERE a.UUID = $pub_uuid return b"
with self.driver.session() as session:
nodes = session.run(cql_pub_incl_idx, pub_uuid=pub_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
contributors.append(Contributor.deserialize(db_ref["b"]))
return contributors
# Retrieve publication with all relationships
def query_pub_full(self, node_uuid: str) -> Union[Publication, None]:
pub = self.query_pub(node_uuid)
if pub is not None:
# industry identifiers
pub.identifiers = self.query_pub_identifiers(pub.UUID)
# contributors
contributors = self.query_pub_contributors(pub.UUID)
pub.authors = []
pub.editors = []
for contributor in contributors:
if "author" in contributor.type:
pub.authors.append(contributor)
else:
if "editor" in contributor.type:
pub.editors.append(contributor)
# bibliographic references
pub.bib_refs = self.query_pub_bib_refs(pub.UUID)
# index references
pub.index_refs = self.query_pub_index_refs(pub.UUID)
return pub
return None
def process_base_refs(self, cql_refs: str, cls, limit: int = None):
refs = []
if limit:
cql_refs += " limit " + str(limit)
with self.driver.session() as session:
nodes = session.run(cql_refs)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
refs.append(cls.deserialize(db_ref["a"]))
return refs
# Retrieve all references
def query_bib_refs(self, limit: int = None) -> List[Reference]:
cql_refs = "MATCH (a:Reference) return a"
return self.process_base_refs(cql_refs, Reference, limit)
# Retrieve all index references
def query_index_refs(self, limit: int = None) -> List[IndexReference]:
cql_refs = "MATCH (a:IndexReference) return a"
return self.process_base_refs(cql_refs, IndexReference, limit)
# Retrieve bibliographic references for a cluster
def query_cluster_bib_refs(self, cluster_uuid: str) -> List[Reference]:
refs = []
cql_pub_cites_ref = "MATCH (a:Reference)-[r:BelongsTo]->(b:Cluster) WHERE b.UUID = $cluster_uuid return a"
with self.driver.session() as session:
nodes = session.run(cql_pub_cites_ref, cluster_uuid=cluster_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
refs.append(Reference.deserialize(db_ref["a"]))
return refs
# Retrieve bibliographic references for a cluster
def query_cluster_index_refs(self, cluster_uuid: str) -> List[IndexReference]:
refs = []
cql_pub_cites_ref = "MATCH (a:IndexReference)-[r:BelongsTo]->(b:IndexCluster) WHERE b.UUID = $cluster_uuid return a"
with self.driver.session() as session:
nodes = session.run(cql_pub_cites_ref, cluster_uuid=cluster_uuid)
db_refs = [record for record in nodes.data()]
for db_ref in db_refs:
refs.append(IndexReference.deserialize(db_ref["a"]))
return refs
# Retrieve all bibliographic clusters
def query_clusters(self, limit: int = None) -> List[Cluster]:
clusters = []
cql_refs = "MATCH (a:Cluster) return a"
if limit:
cql_refs += " limit " + str(limit)
with self.driver.session() as session:
nodes = session.run(cql_refs)
db_clusters = [record for record in nodes.data()]
for db_cluster in db_clusters:
refs = self.query_cluster_bib_refs(db_cluster["a"]["UUID"])
clusters.append(Cluster.deserialize(db_cluster["a"], refs))
return clusters
# Retrieve all bibliographic clusters
def query_index_clusters(self, limit: int = None) -> List[IndexCluster]:
clusters = []
cql_refs = "MATCH (a:IndexCluster) return a"
if limit:
cql_refs += " limit " + str(limit)
with self.driver.session() as session:
nodes = session.run(cql_refs)
db_clusters = [record for record in nodes.data()]
for db_cluster in db_clusters:
refs = self.query_cluster_index_refs(db_cluster["a"]["UUID"])
clusters.append(IndexCluster.deserialize(db_cluster["a"], refs))
return clusters
def query_node_count(self) -> int:
cql_count = "MATCH (a) return count(a) as node_count"
with self.driver.session() as session:
res = session.run(cql_count)
entries = [record for record in res.data()]
return entries[0]['node_count']
def query_rel_count(self) -> int:
cql_count = "MATCH ()-->() RETURN COUNT(*) AS rel_count"
with self.driver.session() as session:
res = session.run(cql_count)
entries = [record for record in res.data()]
return entries[0]['rel_count']
|
py | b400d17db5eed6bb7da6c1efe650469421da31cf | import sys
import re
from os.path import join
from os.path import dirname
from typing import Tuple
from typing import Any
from typing import Optional
from typing import Dict
from typing import List
from .http.http_handler import HttpRequestHandler
from .core.route_converter import RouteConverter
from .core.threaded_server import ThreadedServer
from .core.route_discovery import RouteDiscovery
from .renderers.jinja2_renderer import Jinja2Renderer
from .renderers.base_renderer import BaseRenderer
class PyTerrier():
def __init__(
self,
hostname: Optional[str]='localhost',
port: Optional[int]=8000,
template_dir: Optional[str]='templates',
static_files: Optional[str]='static',
renderer: Optional[BaseRenderer]=Jinja2Renderer) -> None:
"""
Create a new PyTerrier application
:Parameters:
- `hostname`: The hostname the server will be created.
- `port`: Which port the server will listen for connections.
- `template_dir`: Folder where to find the site templates.
- `static_files`: Folder that will contain all static files, images,
stylesheets, fonts.
- `renderer`: Specify the default template engine that will be used
by the framework.
"""
if not issubclass(renderer, BaseRenderer):
error_msg = ('The parameter `renderer` needs to be a subclass of '
'pyterrier.renderers.BaseTemplateRenderer')
raise TypeError(error_msg)
self._hostname = hostname
self._port = port
self._template_dir = join(dirname(sys.argv[0]), template_dir)
self._static_files = join(dirname(sys.argv[0]), static_files)
self._route_discovery = RouteDiscovery()
self.route_converter = RouteConverter()
self._route_table: Dict[str, Tuple[str, Any]] = {}
self._renderer = renderer(self._template_dir)
def _print_config(self) -> None:
""" Print the server information. """
print(f'Server started at http://{self._hostname}:{self._port}')
print(f'=> template_dir: {self._template_dir}')
print(f'=> static_dir: {self._static_files}')
def run(self) -> None:
"""
Start the server and listen on the specified port
for new connections.
"""
options = {
'templates': self._template_dir,
'staticfiles': self._static_files
}
def _handler(*args):
return HttpRequestHandler(
self._route_table,
options,
self._renderer,
*args
)
self._print_config()
self._server = ThreadedServer((self._hostname, self._port), _handler)
try:
self._server.serve_forever()
except KeyboardInterrupt:
print('\nStopping server. Bye!')
def init_routes(self, prefix_routes: Optional[bool]=False) -> None:
"""
The init_routes function will get all routes and actions that have been
created in files in the controllers folder and register within the
PyTerrier route table.
:Parameters:
- `prefix_routes`: Tell the framework to prefix the route with the
name of the controller.
.. Notes:: `controllers` are defined in the controllers directory in
the application's root directory. For instance, if the application
has a controller named `userController.py` and for this controller
there's a action defined with the route /get/{id:int}, if `init_route`
is called with the parameter `prefix_route` set to `True`, the action
will be registered as /user/get/{id:int}
:Usage:
app = PyTerrier()
app.init_routes()
"""
self._route_discovery.register_actions(prefix_routes)
for route in self._route_discovery.actions:
self._register_route(*route)
def _register_route(
self,
route: str,
default_method: str,
func,
additional_methods: List[str]=[]):
"""
Register a new route.
:Parameters:
- `route`: the route definition
- `verb`: the HTTP verb that the action will respond to.
- `func`: the function that will be invoked when the route is
accessed.
.. Note:: Duplicated routes will be overwritten.
"""
func.__setattr__('request', None)
action = func.__get__(func, type(func))
uri_regex = self.route_converter.convert(route)
compiled_uri_regex = re.compile(uri_regex)
methods = [default_method] + additional_methods
for method in methods:
if self._route_table.get(method, None):
self._route_table[method].append((compiled_uri_regex, action))
else:
self._route_table[method] = [(compiled_uri_regex, action)]
def get(self, route: str, additional_methods: List[str]=[]):
"""
Decorator for GET actions.
:Parameters:
- `route`: the URL where the decorated function (action)
can be invoked.
.. Note:: This decorator has the same functionality as the decorator
@get in pyterrier.http module, the main difference is that this
decorator are meant to be used when defining actions in the same file
where the instance of PyTerrier is created. If you intend to define
the actions in files in the `controllers` folder use pyterrier.http.get
instead.
.. Usage::
@app.get('/api/get')
def get(self):
...
"""
return lambda func: self._register_route(
route, 'GET', func, additional_methods
)
def post(self, route: str, additional_methods: List[str]=[]):
"""
Decorator for POST actions
:Parameters:
- `route`: the URL where the decorated function (action) can be
invoked.
.. Note:: This decorator has the same functionality as the @post
decorator in pyterrier.http.post module, the main difference is that
this decorator are meant to be used when defining actions in the same
file where the instance of PyTerrier is created. If you intend to
define the actions in files in the `controllers` folder use
pyterrier.http.post instead.
.. Usage::
@app.post('/api/add')
def post(self):
...
"""
return lambda func: self._register_route(
route, 'POST', func, additional_methods
)
def put(self, route: str, additional_methods: List[str]=[]):
"""
Decorator for PUT actions.
:Parameters:
- `route`: the URL where the decorated function (action) can be
invoked.
.. Note:: This decorator has the same functionality as the @put
decorator in pyterrier.http module, the main difference is that this
decorator are meant to be used when defining actions in the same file
where the instance of PyTerrier is created.
If you intend to define the actions in files in the `controllers`
folder use pyterrier.http.put instead.
.. Usage::
@app.put('/api/update')
def put(self):
...
"""
return lambda func: self._register_route(
route, 'PUT', func, additional_methods
)
def patch(self, route: str, additional_methods: List[str]=[]):
"""
Decorator for PATCH actions.
:Parameters:
- `route`: the URL where the decorated function (action) can be
invoked.
.. Note:: This decorator has the same functionality as the @patch
decorator in pyterrier.http module, the main difference is that this
decorator are meant to be used when defining actions in the same file
where the instance of PyTerrier is created.
If you intend to define the actions in files in the `controllers`
folder use pyterrier.http.patch instead.
.. Usage::
@app.patch('/api/update')
def patch(self):
...
"""
return lambda func: self._register_route(
route, 'PATCH', func, additional_methods
)
def delete(self, route: str, additional_methods: List[str]=[]):
"""
Decorator for DELETE actions.
:Parameters:
- `route`: the URL where the decorated function (action) can be
invoked.
.. Note:: This decorator has the same functionality as the @delete
decorator in pyterrier.http module, the main difference is that this
decorator are meant to be used when defining actions in the same file
where the instance of PyTerrier is created. If you intend to define
the actions in files in the `controllers` folder use
pyterrier.http.delete instead.
.. Usage::
@app.delete('/api/delete')
def delete(self):
...
"""
return lambda func: self._register_route(
route, 'DELETE', func, additional_methods
)
|
py | b400d1ccaf2d8ab345961306b7dfbcaa60ae14ff | # Generated by Django 3.1.1 on 2020-09-19 13:49
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_name', models.CharField(max_length=50, verbose_name='author name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('title', models.CharField(help_text='Post title.', max_length=100, verbose_name='title')),
('link', models.URLField(help_text='Post link.', verbose_name='link')),
('upvotes_number', models.PositiveSmallIntegerField(help_text='The number of post upvotes.', verbose_name='upvotes number')),
],
options={
'verbose_name': 'post',
'verbose_name_plural': 'posts',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_name', models.CharField(max_length=50, verbose_name='author name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('content', models.CharField(help_text='Comment content.', max_length=250, verbose_name='content')),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, help_text='Parent comment.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='posts.comment', verbose_name="comment's parent")),
('post', models.ForeignKey(help_text='Related post.', on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.post', verbose_name="comment's post")),
],
options={
'verbose_name': 'comment',
'verbose_name_plural': 'comments',
},
),
]
|
py | b400d1d88855d5de4b3b7c928734e2ded1c962a6 | """
Starts a service to scan in intervals for new devices.
Will emit EVENT_PLATFORM_DISCOVERED whenever a new service has been discovered.
Knows which components handle certain types, will make sure they are
loaded before the EVENT_PLATFORM_DISCOVERED is fired.
"""
import json
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.discovery import async_load_platform, async_discover
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['netdisco==2.0.0']
DOMAIN = 'discovery'
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_NETGEAR = 'netgear_router'
SERVICE_WEMO = 'belkin_wemo'
SERVICE_HASS_IOS_APP = 'hass_ios'
SERVICE_IKEA_TRADFRI = 'ikea_tradfri'
SERVICE_HASSIO = 'hassio'
SERVICE_AXIS = 'axis'
SERVICE_APPLE_TV = 'apple_tv'
SERVICE_WINK = 'wink'
SERVICE_XIAOMI_GW = 'xiaomi_gw'
SERVICE_TELLDUSLIVE = 'tellstick'
SERVICE_HUE = 'philips_hue'
SERVICE_KONNECTED = 'konnected'
SERVICE_DECONZ = 'deconz'
SERVICE_DAIKIN = 'daikin'
SERVICE_SABNZBD = 'sabnzbd'
SERVICE_SAMSUNG_PRINTER = 'samsung_printer'
SERVICE_HOMEKIT = 'homekit'
CONFIG_ENTRY_HANDLERS = {
SERVICE_DECONZ: 'deconz',
'google_cast': 'cast',
SERVICE_HUE: 'hue',
'sonos': 'sonos',
}
SERVICE_HANDLERS = {
SERVICE_HASS_IOS_APP: ('ios', None),
SERVICE_NETGEAR: ('device_tracker', None),
SERVICE_WEMO: ('wemo', None),
SERVICE_IKEA_TRADFRI: ('tradfri', None),
SERVICE_HASSIO: ('hassio', None),
SERVICE_AXIS: ('axis', None),
SERVICE_APPLE_TV: ('apple_tv', None),
SERVICE_WINK: ('wink', None),
SERVICE_XIAOMI_GW: ('xiaomi_aqara', None),
SERVICE_TELLDUSLIVE: ('tellduslive', None),
SERVICE_DAIKIN: ('daikin', None),
SERVICE_SABNZBD: ('sabnzbd', None),
SERVICE_SAMSUNG_PRINTER: ('sensor', 'syncthru'),
SERVICE_KONNECTED: ('konnected', None),
'panasonic_viera': ('media_player', 'panasonic_viera'),
'plex_mediaserver': ('media_player', 'plex'),
'roku': ('media_player', 'roku'),
'yamaha': ('media_player', 'yamaha'),
'logitech_mediaserver': ('media_player', 'squeezebox'),
'directv': ('media_player', 'directv'),
'denonavr': ('media_player', 'denonavr'),
'samsung_tv': ('media_player', 'samsungtv'),
'yeelight': ('light', 'yeelight'),
'frontier_silicon': ('media_player', 'frontier_silicon'),
'openhome': ('media_player', 'openhome'),
'harmony': ('remote', 'harmony'),
'bose_soundtouch': ('media_player', 'soundtouch'),
'bluesound': ('media_player', 'bluesound'),
'songpal': ('media_player', 'songpal'),
'kodi': ('media_player', 'kodi'),
'volumio': ('media_player', 'volumio'),
'nanoleaf_aurora': ('light', 'nanoleaf_aurora'),
'freebox': ('device_tracker', 'freebox'),
'dlna_dmr': ('media_player', 'dlna_dmr'),
}
OPTIONAL_SERVICE_HANDLERS = {
SERVICE_HOMEKIT: ('homekit_controller', None),
}
CONF_IGNORE = 'ignore'
CONF_ENABLE = 'enable'
CONFIG_SCHEMA = vol.Schema({
vol.Required(DOMAIN): vol.Schema({
vol.Optional(CONF_IGNORE, default=[]):
vol.All(cv.ensure_list, [
vol.In(list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS))]),
vol.Optional(CONF_ENABLE, default=[]):
vol.All(cv.ensure_list, [vol.In(OPTIONAL_SERVICE_HANDLERS)])
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Start a discovery service."""
from netdisco.discovery import NetworkDiscovery
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
# Disable zeroconf logging, it spams
logging.getLogger('zeroconf').setLevel(logging.CRITICAL)
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={'source': config_entries.SOURCE_DISCOVERY},
data=info
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.info("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(
hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
results = await hass.async_add_job(_discover, netdisco)
for result in results:
hass.async_add_job(new_service_found(*result))
async_track_point_in_utc_time(hass, scan_devices,
dt_util.utcnow() + SCAN_INTERVAL)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
# discovery local services
if 'HASSIO' in os.environ:
hass.async_add_job(new_service_found(SERVICE_HASSIO, {}))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, schedule_first)
return True
def _discover(netdisco):
"""Discover devices."""
results = []
try:
netdisco.scan()
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results
|
py | b400d3038f59a59c2c49370c644855a41706979c | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: auth.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='auth.proto',
package='authpb',
syntax='proto3',
serialized_options=_b('\n\024com.coreos.jetcd.apiP\001'),
serialized_pb=_b('\n\nauth.proto\x12\x06\x61uthpb\"5\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x10\n\x08password\x18\x02 \x01(\x0c\x12\r\n\x05roles\x18\x03 \x03(\t\"\x83\x01\n\nPermission\x12)\n\x08permType\x18\x01 \x01(\x0e\x32\x17.authpb.Permission.Type\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x11\n\trange_end\x18\x03 \x01(\x0c\"*\n\x04Type\x12\x08\n\x04READ\x10\x00\x12\t\n\x05WRITE\x10\x01\x12\r\n\tREADWRITE\x10\x02\"?\n\x04Role\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12)\n\rkeyPermission\x18\x02 \x03(\x0b\x32\x12.authpb.PermissionB\x18\n\x14\x63om.coreos.jetcd.apiP\x01\x62\x06proto3')
)
_PERMISSION_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='authpb.Permission.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READ', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READWRITE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=167,
serialized_end=209,
)
_sym_db.RegisterEnumDescriptor(_PERMISSION_TYPE)
_USER = _descriptor.Descriptor(
name='User',
full_name='authpb.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='authpb.User.name', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='password', full_name='authpb.User.password', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roles', full_name='authpb.User.roles', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=75,
)
_PERMISSION = _descriptor.Descriptor(
name='Permission',
full_name='authpb.Permission',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='permType', full_name='authpb.Permission.permType', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='authpb.Permission.key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='range_end', full_name='authpb.Permission.range_end', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_PERMISSION_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=78,
serialized_end=209,
)
_ROLE = _descriptor.Descriptor(
name='Role',
full_name='authpb.Role',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='authpb.Role.name', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keyPermission', full_name='authpb.Role.keyPermission', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=274,
)
_PERMISSION.fields_by_name['permType'].enum_type = _PERMISSION_TYPE
_PERMISSION_TYPE.containing_type = _PERMISSION
_ROLE.fields_by_name['keyPermission'].message_type = _PERMISSION
DESCRIPTOR.message_types_by_name['User'] = _USER
DESCRIPTOR.message_types_by_name['Permission'] = _PERMISSION
DESCRIPTOR.message_types_by_name['Role'] = _ROLE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
DESCRIPTOR = _USER,
__module__ = 'auth_pb2'
# @@protoc_insertion_point(class_scope:authpb.User)
))
_sym_db.RegisterMessage(User)
Permission = _reflection.GeneratedProtocolMessageType('Permission', (_message.Message,), dict(
DESCRIPTOR = _PERMISSION,
__module__ = 'auth_pb2'
# @@protoc_insertion_point(class_scope:authpb.Permission)
))
_sym_db.RegisterMessage(Permission)
Role = _reflection.GeneratedProtocolMessageType('Role', (_message.Message,), dict(
DESCRIPTOR = _ROLE,
__module__ = 'auth_pb2'
# @@protoc_insertion_point(class_scope:authpb.Role)
))
_sym_db.RegisterMessage(Role)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | b400d467fa0d7659353282d55929b0830852488c | #!/usr/bin/env python3
import os, sys, getopt
from datetime import datetime, date, timedelta
MD = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
OUT_FOLDER = os.path.join(MD, "src/app/_data")
DATA_FOLDER = "/www/faculty/it/bwHPC/SCRIPTS/bwUniCluster2/sacct_logs"
PARTITIONS = ["dev_single", "single", "dev_multiple", "multiple", "fat", "dev_multiple_e",
"multiple_e", "dev_special", "special", "gpu_4", "dev_gpu_4", "gpu_8"]
def main(argv):
msg = f"Usage: {argv[0]} -y START_YEAR"
try:
opts, args = getopt.getopt(argv[1:], "y:")
except getopt.GetoptError:
print(msg)
sys.exit(1)
start_year = 2020
for opt, arg in opts:
if opt == "-y" and int(arg) > 2020:
start_year = int(arg)
END_YEAR = date.today().year
# write collected data into the data file
while start_year <= END_YEAR:
# sort text into a dict once
sorted_text = read_data(start_year)
data_file = os.path.join(OUT_FOLDER, f"{start_year}.ts")
with open(data_file, 'w') as f:
for queue in PARTITIONS:
f.write(ts_list(queue, sorted_text))
start_year += 1
# convert data line to ts list
def convert(line):
data = line.split()
if data[-1] == "Unknown" or data[6] == "Unknown" or data[7] == "Unknown":
return -1
start = datetime.fromisoformat(data[6])
submit = datetime.fromisoformat(data[-1])
wait = start - submit
# get number of procs
ntasks = data[-3]
# get days to add to hours if it is the case
if ('day' in str(wait)):
days = wait.days
time = str(wait).split(', ')[1].split(":")
hours = time
else:
days = 0
time = str(wait).split(':')
hours = days * 24 + int(time[0])
minutes = int(time[1])
seconds = int(time[2])
total = seconds + 60 * minutes + 3600 * hours
if total == 0: return -1
return f'["", new Date({start.year}, {start.month - 1}, {start.day}), {total}, {ntasks}]'
def read_data(year):
if year == 2020:
start_month = 3
else:
start_month = 1
# set the end month up to which to read data to
CURRENT_YEAR = date.today().year
if year < CURRENT_YEAR:
end_month = 12
else:
yesterday = date.today() - timedelta(1)
end_month = yesterday.month
# init a dictionary to collect lines from all the data files
sorted_lines = {}
for queue in PARTITIONS:
sorted_lines[queue] = []
# read each file and sort lines into the dictionary
while start_month <= end_month:
if start_month < 10:
month = f"0{start_month}"
else: month = str(start_month)
file = os.path.join(DATA_FOLDER, f"{year}-{month}.log")
with open(file, 'r') as f:
for line in f:
queue = line.split()[-2]
if queue not in PARTITIONS: continue # ignore comments and redundancy in txt file
sorted_lines[queue].append(line)
start_month += 1
return sorted_lines
# return ts list for a specific queue
def ts_list(queue, sorted_text):
text = f"export const {queue}:any[] = ["
for line in sorted_text[queue]:
converted = convert(line)
if converted != -1:
text += converted + ", "
text += "]\n\n"
return text
if __name__ == '__main__':
main(sys.argv)
|
py | b400d4afafb8b7b299f8b4c84282e56f6c28478d | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
class DanglingIndicesClient(NamespacedClient):
@query_params("accept_data_loss", "master_timeout", "timeout")
def delete_dangling_index(self, index_uuid, params=None, headers=None):
"""
Deletes the specified dangling index
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html>`_
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to delete
the dangling index
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return self.transport.perform_request(
"DELETE",
_make_path("_dangling", index_uuid),
params=params,
headers=headers,
)
@query_params("accept_data_loss", "master_timeout", "timeout")
def import_dangling_index(self, index_uuid, params=None, headers=None):
"""
Imports the specified dangling index
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html>`_
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to import
the dangling index
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return self.transport.perform_request(
"POST", _make_path("_dangling", index_uuid), params=params, headers=headers
)
@query_params()
def list_dangling_indices(self, params=None, headers=None):
"""
Returns all dangling indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html>`_
"""
return self.transport.perform_request(
"GET", "/_dangling", params=params, headers=headers
)
|
py | b400d4c5e55aab2e1f4b8d357fecf5871cbafec2 | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.ops import ModulatedDeformConv2d, modulated_deform_conv2d
from mmcv.runner import load_checkpoint
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
import math
import torch.nn.functional as F
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.models.restorers.encoder_decoder import EncoderDecoder
from mmedit.utils import get_root_logger
from mmedit.models.common.sr_backbone_utils import default_init_weights, GaussModulation
class CRANV2(nn.Conv2d):
def __init__(self, in_channels=64, mid_channels=64, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True):
super(CRANV2, self).__init__(in_channels, mid_channels, kernel_size, stride, padding, dilation, groups, bias)
self.stride = stride
self.padding= padding
self.dilation = dilation
self.groups = groups
self.mid_channel = mid_channels
self.kernel_size = kernel_size
# weight & bias for content-gated-convolution
self.weight_conv = Parameter(torch.zeros(mid_channels, in_channels, kernel_size, kernel_size), requires_grad=True)
self.bias_conv = Parameter(torch.zeros(mid_channels), requires_grad=True)
# init weight_conv layer
nn.init.kaiming_normal_(self.weight_conv)
# target spatial size of the pooling layer
self.avg_pool = nn.AdaptiveAvgPool2d((kernel_size, kernel_size))
# the dimension of latent representation
self.num_latent = int((kernel_size * kernel_size) / 2 + 1)
# the context encoding module
self.context_encoding = nn.Linear(kernel_size*kernel_size, self.num_latent, False)
self.context_encoding_bn = nn.BatchNorm1d(in_channels)
# relu function
self.relu = nn.ReLU(inplace=True)
# the number of groups in the channel interaction module
if in_channels // 16: self.g = 16
else: self.g = in_channels
# the channel interacting module
self.channel_interact = nn.Linear(self.g, mid_channels // (in_channels // self.g), bias=False)
self.channel_interact_bn = nn.BatchNorm1d(mid_channels)
self.channel_interact_bn2 = nn.BatchNorm1d(in_channels)
# the gate decoding module (spatial interaction)
self.gate_decode = nn.Linear(self.num_latent, kernel_size * kernel_size, False)
self.gate_decode2 = nn.Linear(self.num_latent, kernel_size * kernel_size, False)
# used to prepare the input feature map to patches
self.unfold = nn.Unfold(kernel_size, dilation, padding, stride)
# sigmoid function
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, h, w = x.size()
weight = self.weight_conv
# allocate global information and context-encoding module
out = self.context_encoding(self.avg_pool(x).view(b, c, -1))
# use different bn for following two branches
context_encoding2 = out.clone()
out = self.relu(self.context_encoding_bn(out))
# gate decoding branch 1 (spatial interaction)
out = self.gate_decode(out) # out: batch x n_feat x 9 (5 --> 9 = 3x3)
# channel interacting module
oc = self.channel_interact(self.relu(self.channel_interact_bn2(context_encoding2).view(b, c//self.g, self.g, -1).transpose(2,3))).transpose(2,3).contiguous()
oc = self.relu(self.channel_interact_bn(oc.view(b, self.mid_channel, -1))) # oc: batch x n_feat x 5 (after grouped linear layer)
# gate decoding branch 2 (spatial interaction)
oc = self.gate_decode2(oc)
# produce gate (equation (4) in the CRAN paper)
out = self.sigmoid(out.view(b, 1, c, self.kernel_size, self.kernel_size)
+ oc.view(b, self.mid_channel, 1, self.kernel_size, self.kernel_size))
# unfolding input feature map to patches
x_unfold = self.unfold(x)
b, _, l = x_unfold.size()
# gating
out = (out * weight.unsqueeze(0)).view(b, self.mid_channel, -1)
return torch.matmul(out, x_unfold).view(-1, c, h, w)
class CRANResidualBlockNoBN(nn.Module):
def __init__(self, mid_channels=64):
super().__init__()
self.residual_block = ResidualBlockNoBN(mid_channels=mid_channels)
self.cran_block = CRANV2(mid_channels=mid_channels)
self.fusion = nn.Conv2d(mid_channels * 2, mid_channels, 3, 1, 1)
def forward(self, feat):
cran_feat = self.cran_block(feat)
feat = torch.cat([cran_feat, feat], dim=1)
feat = self.fusion(feat)
feat_prop = self.residual_block(feat)
return feat_prop
|
py | b400d5183ddc49c01c1c76607cbf41f59a7abfab | # 05-upload-data.py
from azureml.core import Workspace
import argparse
ws = Workspace.from_config()
datastore = ws.get_default_datastore()
datastore.upload(src_dir='./data',
target_path='datasets/cifar10',
overwrite=True)
"""if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--src_dir',
type=str,
default=None,
help='Path to the local data directory'
)
parser.add_argument(
'--target_path',
type=str,
default=None,
help='Target path name'
)
args = parser.parse_args()
datastore.upload(src_dir=args.src_dir,
target_path=parser.target_path,
overwrite=True)""" |
py | b400d531c42ffa9b7bffaac3eccb0fa27b4ffb1d | from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F
from torch import Tensor
# Define a TorchScript compatible Functional Adamax Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamax(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
_allow_empty_param_list: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
exp_avgs = []
exp_infs = []
state_steps: List[int] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(self.param_group['params'], gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_infs.append(state['exp_inf'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'].item())
with torch.no_grad():
F.adamax(params_with_grad,
grads,
exp_avgs,
exp_infs,
state_steps,
eps=self.defaults['eps'],
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'])
|
py | b400d578e7b5c242cce6af2520845f07b857fa1d | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: lipeijie
import os
import shutil
from easyai.helper.dirProcess import DirProcess
class CopyImage():
def __init__(self):
self.dir_process = DirProcess()
def copy(self, trainPath, image_save_dir):
image_list = self.get_image_list(trainPath)
if os.path.exists(image_save_dir):
os.system('rm -rf ' + image_save_dir)
os.makedirs(image_save_dir, exist_ok=True)
if len(image_list) > 0:
image_path = image_list[0]
path, image_name = os.path.split(image_path)
save_path = os.path.join(image_save_dir, image_name)
shutil.copy(image_path, save_path)
else:
print("empty images")
def get_image_list(self, trainPath):
result = []
path, _ = os.path.split(trainPath)
imagesDir = os.path.join(path, "../JPEGImages")
for fileNameAndPost in self.dir_process.getFileData(trainPath):
imagePath = os.path.join(imagesDir, fileNameAndPost)
#print(imagePath)
if os.path.exists(imagePath):
result.append(imagePath)
return result
|
py | b400d64359d8771afbbd6f98172ad95314bfbf5e | import datetime
import toga
import toga_dummy
from toga_dummy.utils import TestCase
class TimePickerTests(TestCase):
def setUp(self):
super().setUp()
self.time_picker = toga.TimePicker(factory=toga_dummy.factory)
def test_widget_created(self):
self.assertEqual(self.time_picker._impl.interface, self.time_picker)
self.assertActionPerformed(self.time_picker, 'create TimePicker')
def test_getting_value_invokes_impl_method(self):
# Exercise the value attribute getter for testing only. Actual value not needed.
self.time_picker.value
self.assertValueGet(self.time_picker, 'value')
def test_set_value_with_None(self):
self.time_picker.value = None
none_default = datetime.datetime.today().time().replace(microsecond=0)
self.assertValueSet(self.time_picker, 'value', none_default.strftime('%H:%M:%S'))
def test_set_value_with_an_hour_ago(self):
hour_ago = datetime.datetime.today() - datetime.timedelta(hours=1)
self.time_picker.value = hour_ago.time()
self.assertValueSet(self.time_picker, 'value', hour_ago.strftime('%H:%M:%S.%f'))
def test_setting_value_invokes_impl_method(self):
new_value = 'New Value'
self.time_picker.value = new_value
self.assertValueSet(self.time_picker, 'value', new_value)
def test_min_max_time(self):
self.assertEqual(self.time_picker.min_time, None)
self.assertEqual(self.time_picker.max_time, None)
hour_ago = datetime.datetime.today() - datetime.timedelta(hours=1)
self.time_picker.min_time = hour_ago.time()
self.time_picker.max_time = hour_ago.time()
self.assertEqual(self.time_picker.min_time, hour_ago.strftime('%H:%M:%S.%f'))
self.assertEqual(self.time_picker.max_time, hour_ago.strftime('%H:%M:%S.%f'))
def test_on_change_callback_set(self):
def dummy_function():
pass
self.time_picker.on_change = dummy_function
self.assertIsNotNone(self.time_picker.on_change)
|
py | b400d73c3bb592a563baf9fcd6e7661065b41e19 | """
The 'cheml.wrappers.BANK' module includes ,
last modified date: Aug 29, 2017
"""
# from test import test
__all__ = []
|
py | b400d827c09499f82461f0db5571b6e88366ca00 | from typing import Optional
import datetime
import logging
import pathlib
import cv2
import numpy as np
import yacs.config
import os
import argparse
from tqdm import tqdm
from ptgaze import (Face, FacePartsName, GazeEstimationMethod, GazeEstimator,
Visualizer)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Demo:
QUIT_KEYS = {27, ord('q')}
def __init__(self, config: yacs.config.CfgNode):
self.config = config
self.gaze_estimator = GazeEstimator(config)
self.visualizer = Visualizer(self.gaze_estimator.camera)
self.cap = self._create_capture()
self.output_dir = self._create_output_dir()
self.writer = self._create_video_writer()
self.stop = False
self.show_bbox = self.config.demo.show_bbox
self.show_head_pose = self.config.demo.show_head_pose
self.show_landmarks = self.config.demo.show_landmarks
self.show_normalized_image = self.config.demo.show_normalized_image
self.show_template_model = self.config.demo.show_template_model
def run(self) -> None:
if self.config.demo.use_camera or self.config.demo.video_path:
self._run_on_video()
elif self.config.demo.image_path:
self._run_on_image()
else:
raise ValueError
def _run_on_image(self):
image_path = self.config.demo.image_path
# print(str(image_path), flush=True)
listdir = os.listdir(image_path)
for img_name in tqdm(listdir):
image_path_name = os.path.join(image_path, img_name)
image = cv2.imread(image_path_name)
self._process_image(image)
if self.config.demo.display_on_screen:
while True:
key_pressed = self._wait_key()
if self.stop:
break
if key_pressed:
self._process_image(image)
cv2.imshow('image', self.visualizer.image)
if self.config.demo.output_dir:
output_path = os.path.join(self.config.demo.output_dir, img_name)
cv2.imwrite(output_path, self.visualizer.image)
def _run_on_video(self) -> None:
while True:
if self.config.demo.display_on_screen:
self._wait_key()
if self.stop:
break
ok, frame = self.cap.read()
if not ok:
break
self._process_image(frame)
if self.config.demo.display_on_screen:
cv2.imshow('frame', self.visualizer.image)
self.cap.release()
if self.writer:
self.writer.release()
def _process_image(self, image) -> None:
undistorted = cv2.undistort(
image, self.gaze_estimator.camera.camera_matrix,
self.gaze_estimator.camera.dist_coefficients)
self.visualizer.set_image(image.copy())
faces = self.gaze_estimator.detect_faces(undistorted)
for face in faces:
self.gaze_estimator.estimate_gaze(undistorted, face)
self._draw_face_bbox(face)
self._draw_head_pose(face)
self._draw_landmarks(face)
self._draw_face_template_model(face)
self._draw_gaze_vector(face)
self._display_normalized_image(face)
if self.config.demo.use_camera:
self.visualizer.image = self.visualizer.image[:, ::-1]
if self.writer:
self.writer.write(self.visualizer.image)
def _create_capture(self) -> Optional[cv2.VideoCapture]:
if self.config.demo.image_path:
return None
if self.config.demo.use_camera:
cap = cv2.VideoCapture(0)
elif self.config.demo.video_path:
cap = cv2.VideoCapture(self.config.demo.video_path)
else:
raise ValueError
cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.gaze_estimator.camera.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.gaze_estimator.camera.height)
return cap
def _create_output_dir(self) -> Optional[pathlib.Path]:
if not self.config.demo.output_dir:
return
output_dir = pathlib.Path(self.config.demo.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
return output_dir
@staticmethod
def _create_timestamp() -> str:
dt = datetime.datetime.now()
return dt.strftime('%Y%m%d_%H%M%S')
def _create_video_writer(self) -> Optional[cv2.VideoWriter]:
if self.config.demo.image_path:
return None
if not self.output_dir:
return None
ext = self.config.demo.output_file_extension
if ext == 'mp4':
fourcc = cv2.VideoWriter_fourcc(*'H264')
elif ext == 'avi':
fourcc = cv2.VideoWriter_fourcc(*'PIM1')
else:
raise ValueError
if self.config.demo.use_camera:
output_name = f'{self._create_timestamp()}.{ext}'
elif self.config.demo.video_path:
name = pathlib.Path(self.config.demo.video_path).stem
output_name = f'{name}.{ext}'
else:
raise ValueError
output_path = self.output_dir / output_name
writer = cv2.VideoWriter(output_path.as_posix(), fourcc, 30,
(self.gaze_estimator.camera.width,
self.gaze_estimator.camera.height))
if writer is None:
raise RuntimeError
return writer
def _wait_key(self) -> bool:
key = cv2.waitKey(self.config.demo.wait_time) & 0xff
if key in self.QUIT_KEYS:
self.stop = True
elif key == ord('b'):
self.show_bbox = not self.show_bbox
elif key == ord('l'):
self.show_landmarks = not self.show_landmarks
elif key == ord('h'):
self.show_head_pose = not self.show_head_pose
elif key == ord('n'):
self.show_normalized_image = not self.show_normalized_image
elif key == ord('t'):
self.show_template_model = not self.show_template_model
else:
return False
return True
def _draw_face_bbox(self, face: Face) -> None:
if not self.show_bbox:
return
self.visualizer.draw_bbox(face.bbox)
def _draw_head_pose(self, face: Face) -> None:
if not self.show_head_pose:
return
# Draw the axes of the model coordinate system
length = self.config.demo.head_pose_axis_length
self.visualizer.draw_model_axes(face, length, lw=2)
euler_angles = face.head_pose_rot.as_euler('XYZ', degrees=True)
pitch, yaw, roll = face.change_coordinate_system(euler_angles)
logger.info(f'[head] pitch: {pitch:.2f}, yaw: {yaw:.2f}, '
f'roll: {roll:.2f}, distance: {face.distance:.2f}')
def _draw_landmarks(self, face: Face) -> None:
if not self.show_landmarks:
return
self.visualizer.draw_points(face.landmarks,
color=(0, 255, 255),
size=1)
def _draw_face_template_model(self, face: Face) -> None:
if not self.show_template_model:
return
self.visualizer.draw_3d_points(face.model3d,
color=(255, 0, 525),
size=1)
def _display_normalized_image(self, face: Face) -> None:
if not self.config.demo.display_on_screen:
return
if not self.show_normalized_image:
return
if self.config.mode == GazeEstimationMethod.MPIIGaze.name:
reye = face.reye.normalized_image
leye = face.leye.normalized_image
normalized = np.hstack([reye, leye])
elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
normalized = face.normalized_image
else:
raise ValueError
if self.config.demo.use_camera:
normalized = normalized[:, ::-1]
cv2.imshow('normalized', normalized)
def _draw_gaze_vector(self, face: Face) -> None:
length = self.config.demo.gaze_visualization_length
if self.config.mode == GazeEstimationMethod.MPIIGaze.name:
for key in [FacePartsName.REYE, FacePartsName.LEYE]:
eye = getattr(face, key.name.lower())
self.visualizer.draw_3d_line(
eye.center, eye.center + length * eye.gaze_vector)
pitch, yaw = np.rad2deg(eye.vector_to_angle(eye.gaze_vector))
logger.info(
f'[{key.name.lower()}] pitch: {pitch:.2f}, yaw: {yaw:.2f}')
elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
self.visualizer.draw_3d_line(
face.center, face.center + length * face.gaze_vector)
pitch, yaw = np.rad2deg(face.vector_to_angle(face.gaze_vector))
logger.info(f'[face] pitch: {pitch:.2f}, yaw: {yaw:.2f}')
else:
raise ValueError
|
py | b400da741c1ab47e778000ab302457cc4b3d8213 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2018-02-26 13:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yaksh', '0009_auto_20180113_1124'),
]
operations = [
migrations.AddField(
model_name='learningmodule',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='lesson',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='quiz',
name='time_between_attempts',
field=models.FloatField(verbose_name='Time Between Quiz Attempts in hours'),
),
migrations.AlterUniqueTogether(
name='answerpaper',
unique_together=set([('user', 'question_paper', 'attempt_number', 'course')]),
),
]
|
py | b400dace4638e30b39bcae9d3e12de4226514cc6 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import sys
import os
import re
# 获取目录下满足条件的文件
def get_file_path(dirPath, match):
result_file_path = ''
regexObj = re.compile(r'{}'.format(match))
if os.path.exists(dirPath):
pass
else:
return null
if os.path.isdir(dirPath):
child_file_list = os.listdir(dirPath)
for file in child_file_list:
file_path = dirPath + '/' + file
if not os.path.isdir(file_path):
if os.path.exists(file_path) and regexObj.search(file_path):
result_file_path = file_path
return result_file_path
# 是否是数字
def is_number(str):
try:
# 因为使用float有一个例外是'NaN'
if str=='NaN':
return False
float(str)
return True
except ValueError:
return False
def check_podspec():
current_path = os.getcwd()
podspec_file_path = get_file_path(current_path, '\.podspec$')
if podspec_file_path:
pass
else:
raise Exception('当前git 文件目录下没有找到.podspec文件,查看是否配置错误')
error_note_lines = []
missing_note_lines = []
with open(podspec_file_path, 'r') as podspec_file:
line = podspec_file.readline()
line_index = 1
while line:
# rule = re.compile(r'(\s*s.dependency\s*)([^#].*,.*)(#\s*)(?P<ssss>(?:[^@ ].{1,}))(?P<ttt>(?:@[^ ]).{1,})((?: ){1,}\d$)')
# match = re.search(rule, line)
# if match:
# print(line.strip(), '=====', line_index, '+++', match.groupdict()['ssss'], match.groupdict()['ttt'])
# pass
regexObj = re.compile(r'(?P<prefix>\s*s.dependency\s*)(?P<pod>[^#].*,.*)(?P<note>#.*)')
matchObj = re.search(regexObj, line)
if matchObj:
note = matchObj.groupdict()['note']
regexObj_note = re.compile(r'(^#)(?P<des>[^@]*)(?P<manager>(?:@[^ ]).{1,})(?P<priority>\s.{0,}\d$)')
matchObj_note = re.search(regexObj_note, note.strip())
if matchObj_note:
# print('***', matchObj_note.groupdict()['des'], matchObj_note.groupdict()['manager'], matchObj_note.groupdict()['priority'])
des = matchObj_note.groupdict()['des'].strip()
manager = matchObj_note.groupdict()['manager']
priority = matchObj_note.groupdict()['priority']
if des and is_number(priority):
pass
else:
error_note_lines.append({'line': line_index, 'content': line.strip(), 'reason': 'reason: 数字不正确' if des else 'reason: 依赖库描述缺失'})
pass
else:
error_note_lines.append({'line': line_index, 'content': line.strip(), 'reason': 'reason: 缺失 @负责人员等'})
else:
regexObj_other = re.compile(r'(?P<prefix>^\s*s.dependency\s*)(.*)')
if re.search(regexObj_other, line):
missing_note_lines.append({'line': line_index, 'content': line.strip()})
pass
line = podspec_file.readline()
line_index += 1
pass
# 获取文件名
file_name = os.path.split(podspec_file_path)[1]
out_msg = []
if error_note_lines.__len__():
for line_info in error_note_lines:
out_info = "\033[1;31m File '{}', line {}, in '{} => 依赖注释不正确 {} \033[0m".format(file_name, line_info['line'], line_info['content'], line_info['reason'])
out_msg.append(out_info)
if missing_note_lines.__len__():
for line_info in missing_note_lines:
out_info = "\033[1;33m File '{}', line {}, in '{}' => 缺失依赖注释 \033[0m".format(file_name, line_info['line'], line_info['content'])
out_msg.append(out_info)
if out_msg.__len__():
for out_info in out_msg:
print(out_info)
print("\033[1;32m 注释Demo:\n s.dependency 'SupermarketPlatform', '>=7.38.0.2' # 闪购平台库 @李道建 @冯阳 3 \033[0m")
print("\033[1;32m # 依赖库描述 + @负责人员 + 数字(1:大版本 2:小版本两位 3:小版本三位) \033[0m")
raise Exception('请正确的配置注释')
return 0
if __name__ == "__main__":
sys.exit(check_podspec())
|
py | b400dbb80815380447b2cf9e6ad3b5519ec306c6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 5:41 PM
# @Author : w8ay
# @File : engine.py
# 分发调度引擎
import _thread
import os
import random
import socket
import sys
import threading
import time
from concurrent import futures
from queue import Queue
from urllib.parse import urlparse
import requests
from config import NUM_CACHE_DOMAIN, NUM_CACHE_IP, MASSCAN_DEFAULT_PORT, MASSCAN_FULL_SCAN, IS_START_PLUGINS
from lib.common import is_ip_address_format, is_url_format
from lib.data import logger, PATHS, collector
from lib.loader import load_remote_poc, load_string_to_module
from lib.redis import task_update
from plugins import webeye, webtitle, crossdomain, gitleak, iis_parse, phpinfo, svnleak, tomcat_leak, whatcms, \
ip_location, wappalyzer, directory_browse, password_found
from plugins.masscan import masscan
from plugins.nmap import nmapscan
class Schedular:
def __init__(self, threadnum=1):
self.queue = Queue()
self.ip_queue = Queue()
self.threadNum = threadnum
self.lock = threading.Lock()
self.cache_ips = [] # IP缓冲池
self.cache_domains = [] # 域名缓冲池
logger.info("Start number of threading {}".format(self.threadNum))
def put_target(self, target):
# 判断是IP还是域名,加入不同的字段
if is_ip_address_format(target):
serviceType = "ip"
elif is_url_format(target):
serviceType = "domain"
target = target.rstrip('/')
else:
serviceType = "other"
tmp = {
"target": target,
"serviceType": serviceType
}
if serviceType == "ip":
self.ip_queue.put(tmp)
else:
self.queue.put(tmp)
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
def receive_ip(self):
while 1:
struct = self.ip_queue.get()
serviceType = struct.get("serviceType", 'other')
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
if serviceType == "ip":
flag = False
self.lock.acquire()
self.cache_ips.append(struct)
num = len(self.cache_ips)
if num >= NUM_CACHE_IP:
flag = True
serviceTypes = self.cache_ips
self.cache_ips = []
self.lock.release()
if not flag:
self.ip_queue.task_done()
continue
task_update("running", 1)
try:
self.hand_ip(serviceTypes)
except Exception as e:
logger.error("hand ip error:{}".format(repr(e)))
logger.error(repr(sys.exc_info()))
task_update("running", -1)
self.ip_queue.task_done()
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
def receive(self):
while 1:
struct = self.queue.get()
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
serviceType = struct.get("serviceType", 'other')
if serviceType == "other":
msg = "not matches target:{}".format(repr(struct))
logger.error(msg)
self.queue.task_done()
continue
elif serviceType == "domain":
flag = False
self.lock.acquire()
self.cache_domains.append(struct)
num = len(self.cache_domains)
if num >= NUM_CACHE_DOMAIN:
flag = True
serviceTypes = self.cache_domains
self.cache_domains = []
self.lock.release()
if not flag:
self.queue.task_done()
continue
# 多线程启动扫描域名
for serviceType in serviceTypes:
task_update("running", 1)
try:
self.hand_domain(serviceType)
except Exception as e:
logger.error("hand domain error:{}".format(repr(e)))
logger.error(repr(sys.exc_info()))
task_update("running", -1)
self.queue.task_done()
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
def start(self):
for i in range(self.threadNum - 1):
_thread.start_new_thread(self.receive, ())
_thread.start_new_thread(self.receive_ip, ())
def nmap_result_handle(self, result_nmap: dict, host):
if result_nmap is None:
return None
result2 = {}
for port, portInfo in result_nmap.items():
if host not in result2:
result2[host] = []
if portInfo["state"] != "open":
continue
name = portInfo.get("name", "")
# hand namp bug
product = portInfo.get("product", "")
version = portInfo.get("version", "")
extrainfo = portInfo.get("extrainfo", "")
if "http" in name and "https" not in name:
if port == 443:
_url = "https://{0}:{1}".format(host, port)
else:
_url = "http://{0}:{1}".format(host, port)
self.put_target(_url)
elif "https" in name:
_url = "https://{0}:{1}".format(host, port)
self.put_target(_url)
result2[host].append(
{"port": port, "name": name, "product": product, "version": version, "extrainfo": extrainfo})
return result2
def hand_ip(self, serviceTypes, option='masscan'):
ip_list = []
for item in serviceTypes:
ip_list.append(item["target"])
ports = MASSCAN_DEFAULT_PORT
result2 = {}
if option == 'masscan':
if MASSCAN_FULL_SCAN:
ports = "1-65535"
target = os.path.join(PATHS.OUTPUT_PATH, "target_{0}.log".format(time.time()))
with open(target, "w+") as fp:
fp.write('\n'.join(ip_list))
logger.debug("ip:" + repr(ip_list))
try:
result = masscan(target, ports)
except Exception as e:
logger.error("masscan error msg:{}".format(repr(e)))
result = None
if result is None:
return None
# format:{'115.159.39.75': ['80'], '115.159.39.215': ['80', '3306'],}
for host, ports in result.items():
ports = list(ports)
if host not in result2:
result2[host] = []
task_update("running", 1)
try:
result_nmap = nmapscan(host, ports)
except:
result_nmap = None
task_update("running", -1)
if result_nmap is None:
for tmp_port in ports:
result2[host].append({"port": tmp_port})
continue
tmp_r = self.nmap_result_handle(result_nmap, host=host)
result2.update(tmp_r)
elif option == "nmap":
logger.debug("ip:" + repr(ip_list))
for host in ip_list:
result_nmap = nmapscan(host, ports.split(","))
tmp_r = self.nmap_result_handle(result_nmap, host=host)
if tmp_r:
result2.update(tmp_r)
data = {}
for ip in result2.keys():
# result2[ip]
if ip not in data:
data[ip] = {}
d = ip_location.poc(ip)
if d:
data[ip]["location"] = d
data[ip]["infos"] = result2[ip]
collector.add_ips(data)
for ip in result2.keys():
collector.send_ok_ip(ip)
def hand_domain(self, serviceType):
target = serviceType["target"]
logger.info(target)
# 添加这条记录
collector.add_domain(target)
# 发起请求
try:
r = requests.get(target, timeout=30, verify=False, allow_redirects=False)
collector.add_domain_info(target,
{"headers": r.headers, "body": r.text, "status_code": r.status_code})
except Exception as e:
logger.error("request url error:" + str(e))
collector.del_domain(target)
return
logger.debug("target:{} over,start to scan".format(target))
# Get hostname
hostname = urlparse(target).netloc.split(":")[0]
if not is_ip_address_format(hostname):
try:
_ip = socket.gethostbyname(hostname)
collector.add_domain_info(target, {"ip": _ip})
except:
pass
else:
collector.add_domain_info(target, {"ip": hostname})
work_list = [webeye.poc, webtitle.poc, wappalyzer.poc, password_found.poc]
if IS_START_PLUGINS:
work_list.append(crossdomain.poc)
work_list.append(directory_browse.poc)
work_list.append(gitleak.poc)
work_list.append(iis_parse.poc)
work_list.append(phpinfo.poc)
work_list.append(svnleak.poc)
work_list.append(tomcat_leak.poc)
work_list.append(whatcms.poc)
# WorkList.append(bakfile.poc) # 去除备份文件扫描模块,原因:太费时
# th = []
# try:
# for func in work_list:
# i = threading.Thread(target=func, args=(target,))
# i.start()
# th.append(i)
# for thi in th:
# thi.join()
# except Exception as e:
# logger.error("domain plugin threading error {}:{}".format(repr(Exception), str(e)))
for func in work_list:
try:
func(target)
except Exception as e:
logger.error("domain plugin threading error {}:{}".format(repr(Exception), str(e)))
logger.debug("target:{} End of scan".format(target))
infos = collector.get_domain(target)
_pocs = []
temp = {}
if IS_START_PLUGINS and "CMS" in infos:
if infos.get("app"):
temp["app"] = []
temp["app"].append(infos["CMS"])
else:
temp["app"] = [infos["CMS"]]
# update domain app
collector.add_domain_info(target, temp)
if temp.get("app"):
keywords = temp["app"]
# 远程读取插件
pocs = load_remote_poc()
for poc in pocs:
for keyword in keywords:
if poc["name"] == keyword:
webfile = poc["webfile"]
logger.debug("load {0} poc:{1} poc_time:{2}".format(poc["type"], webfile, poc["time"]))
# 加载插件
code = requests.get(webfile).text
obj = load_string_to_module(code, webfile)
_pocs.append(obj)
# 并发执行插件
if _pocs:
executor = futures.ThreadPoolExecutor(len(_pocs))
fs = []
for f in _pocs:
taks = executor.submit(f.poc, target)
fs.append(taks)
for f in futures.as_completed(fs):
try:
res = f.result()
except Exception as e:
res = None
logger.error("load poc error:{} error:{}".format(target, str(e)))
if res:
name = res.get("name") or "scan_" + str(time.time())
collector.add_domain_bug(target, {name: res})
collector.send_ok(target)
def run(self):
while 1:
# 对剩余未处理的域名进行处理
if self.cache_domains:
self.lock.acquire()
service_types = self.cache_domains
self.cache_domains = []
self.lock.release()
# 多线程启动扫描域名
for serviceType in service_types:
task_update("running", 1)
try:
self.hand_domain(serviceType)
except Exception as e:
logger.error(repr(sys.exc_info()))
task_update("running", -1)
# 对剩余未处理的ip进行处理
if self.cache_ips:
self.lock.acquire()
service_types = self.cache_ips
self.cache_ips = []
self.lock.release()
task_update("running", 1)
try:
self.hand_ip(service_types)
except Exception as e:
logger.error(repr(sys.exc_info()))
task_update("running", -1)
# 最后一次提交
collector.submit()
task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
time.sleep(random.randint(2, 10))
|
py | b400dc270caeada99017e95766ee43a98570ff55 | # divide and conquer
# get_ancestor: return None if contains neither nodes
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if p == None or q == None:
return None
return self.get_ancestor(root, p, q)
def get_ancestor(self, root, p, q):
if root == None or root == p or root == q:
return root
left = self.get_ancestor(root.left, p, q)
right = self.get_ancestor(root.right, p, q)
if left != None and right != None: # p and q are seperated
return root
if left != None: # p and q are both in left
return left
if right != None: # p and q are both in left
return right
return None # empty |
py | b400dc6448d1d1c14f11c7e018ec633e8776b0cb | import logging
from stix_shifter_utils.modules.base.stix_translation.base_query_translator import (
BaseQueryTranslator
)
from . import query_constructor
logger = logging.getLogger(__name__)
class QueryTranslator(BaseQueryTranslator):
def transform_antlr(self, data, antlr_parsing_object):
logger.info("Converting STIX2 Pattern to data source query")
query_string = query_constructor.translate_pattern(
antlr_parsing_object, self, self.options)
return query_string
|
py | b400dd6fa52c61402371d1babb72f28afe29d61f | import os, glob
from argparse import ArgumentParser
from .config import ConfigReader, ReadingError
from .dispatcher import Dispatcher, DispatchError
from .messenger import Messenger
from .messenger import Level
from .util import module
import dotbot
import yaml
def add_options(parser):
parser.add_argument('-Q', '--super-quiet', action='store_true',
help='suppress almost all output')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress most output')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose output')
parser.add_argument('-d', '--base-directory',
help='execute commands from within BASEDIR',
metavar='BASEDIR')
parser.add_argument('-c', '--config-file',
help='run commands given in CONFIGFILE', metavar='CONFIGFILE')
parser.add_argument('-p', '--plugin', action='append', dest='plugins', default=[],
help='load PLUGIN as a plugin', metavar='PLUGIN')
parser.add_argument('--disable-built-in-plugins',
action='store_true', help='disable built-in plugins')
parser.add_argument('--plugin-dir', action='append', dest='plugin_dirs', default=[],
metavar='PLUGIN_DIR', help='load all plugins in PLUGIN_DIR')
parser.add_argument('--only', nargs='+',
help='only run specified directives', metavar='DIRECTIVE')
parser.add_argument('--except', nargs='+', dest='skip',
help='skip specified directives', metavar='DIRECTIVE')
parser.add_argument('--no-color', dest='no_color', action='store_true',
help='disable color output')
parser.add_argument('--version', action='store_true',
help='show program\'s version number and exit')
def read_config(config_file):
reader = ConfigReader(config_file)
return reader.get_config()
def main():
log = Messenger()
try:
parser = ArgumentParser()
add_options(parser)
options = parser.parse_args()
if options.version:
print('Dotbot version %s (yaml: %s)' % (dotbot.__version__, yaml.__version__))
exit(0)
if options.super_quiet:
log.set_level(Level.WARNING)
if options.quiet:
log.set_level(Level.INFO)
if options.verbose:
log.set_level(Level.DEBUG)
if options.no_color:
log.use_color(False)
plugin_directories = list(options.plugin_dirs)
if not options.disable_built_in_plugins:
from .plugins import Clean, Create, Link, Shell
plugin_paths = []
for directory in plugin_directories:
for plugin_path in glob.glob(os.path.join(directory, '*.py')):
plugin_paths.append(plugin_path)
for plugin_path in options.plugins:
plugin_paths.append(plugin_path)
for plugin_path in plugin_paths:
abspath = os.path.abspath(plugin_path)
module.load(abspath)
if not options.config_file:
log.error('No configuration file specified')
exit(1)
tasks = read_config(options.config_file)
if tasks is None:
log.warning('Configuration file is empty, no work to do')
tasks = []
if not isinstance(tasks, list):
raise ReadingError('Configuration file must be a list of tasks')
if options.base_directory:
base_directory = os.path.abspath(options.base_directory)
else:
# default to directory of config file
base_directory = os.path.dirname(os.path.abspath(options.config_file))
os.chdir(base_directory)
dispatcher = Dispatcher(base_directory, only=options.only, skip=options.skip)
success = dispatcher.dispatch(tasks)
if success:
log.info('\n==> All tasks executed successfully')
else:
raise DispatchError('\n==> Some tasks were not executed successfully')
except (ReadingError, DispatchError) as e:
log.error('%s' % e)
exit(1)
except KeyboardInterrupt:
log.error('\n==> Operation aborted')
exit(1)
|
py | b400de2bc84d8246c1d387c293b770bb167a0098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List
from epicteller.core.model.combat import CombatToken
from epicteller.core.model.kafka_msg import base
from epicteller.core.model.kafka_msg.base import KafkaMsg
class MsgCombat(KafkaMsg):
combat_id: int
@base.action
class MsgCombatCreate(MsgCombat):
action = 'epicteller.combat.create'
@base.action
class MsgCombatRun(MsgCombat):
action = 'epicteller.combat.run'
@base.action
class MsgCombatEnd(MsgCombat):
action = 'epicteller.combat.end'
@base.action
class MsgCombatActingTokenChange(MsgCombat):
action = 'epicteller.combat.acting_token_change'
last_token_name: str
current_token_name: str
rank: int
round_count: int
is_next_round: bool = False
@base.action
class MsgCombatReorderToken(MsgCombat):
action = 'epicteller.combat.reorder_token'
last_order_list: List[str]
current_order_list: List[str]
@base.action
class MsgAddCombatToken(MsgCombat):
action = 'epicteller.combat.add_combat_token'
token: CombatToken
rank: int
@base.action
class MsgRemoveCombatToken(MsgCombat):
action = 'epicteller.combat.remove_combat_token'
token: CombatToken
|
py | b400de84160480692941fb7d72517f1ba15aeadf | import sys
import os
import json
import itertools
import sim
import operator
from collections import defaultdict
BIG = [0]
SMALL = [1, 2, 3]
# PATH = "python /scratch/nas/1/dn/sniper-6.0/benchmarks/SimResults/myNumpy0102.py "
PATH = "python /scratch/nas/1/dn/sniper-6.0/scripts/predictor_blackbox.py "
MODELNAME = "/scratch/nas/1/dn/sniper-6.0/scripts/round_robin_model.p"
accumulated_stats = []
# STATSORDER = ['brhits', 'brmisses', 'dramreqs', 'dramreads', 'dramwrites', 'dtlbaccess', 'dtlbmisses', 'itlbaccess', 'itlbmisses', 'stlbaccess',
# 'stlbmisses', 'dl1loads', 'dl1misses', 'dl1stores', 'il1loads', 'il1misses', 'il1stores','l2loads', 'l2misses', 'l2stores',
# 'l3loads', 'l3misses', 'l3stores', 'uopbr', 'uopfpaddsub', 'uopfpmuldiv', 'uopgeneric', 'uopld', 'uopst', 'uoptotal']
params_to_use_per_thread = [
'uopBR_Norm', 'uopFPtotal_Norm', 'uopGeneric_Norm', 'uopLD_Norm',
'DL1miss_Norm', 'L2miss_Norm', 'L3miss_Norm', 'IL1ld_div_DL1ld_Norm',
'L2miss_div_DL1miss_Norm', 'L3miss_div_L2miss_Norm',
'L3miss_div_DL1miss_Norm'
] # 11 in total
STATSORDER = [['/', 'uopbr', 'uoptotal'], [
'/', ['+', 'uopfpaddsub', 'uopfpmuldiv'], 'uoptotal'
], ['/', 'uopgeneric',
'uoptotal'], ['/', 'uopld', 'uoptotal'], ['/', 'uopst', 'uoptotal'], [
'/', 'dl1misses', ['+', 'dl1loads', 'dl1stores']
], ['/', 'l2misses', ['+', 'l2loads', 'l2stores']], [
'/', 'l3misses', ['+', 'l3loads', 'l3stores']
], ['/', 'il1loads', 'dl1loads'], ['/', 'l2misses', 'dl1misses'],
['/', 'l3misses', 'l2misses'], ['/', 'l3misses', 'dl1misses']]
def getScoreMetricTime(thread_id):
return long(sim.stats.get('thread', thread_id, 'nonidle_elapsed_time'))
def getScoreMetricInstructions(thread_id):
return long(sim.stats.get('thread', thread_id, 'instruction_count'))
class Thread:
global BIG
global SMALL
global PATH
global STATSORDER
def __init__(self, thread_id):
self.thread_id = thread_id
self.core = None
self.runnable = False
self.unscheduled = False
self.BigIpc = 0.1
self.SmallIpc = 0.1
self.getScoreMetric = lambda: getScoreMetricInstructions(thread_id)
self.score = 0 # Accumulated score
self.prevIPC = 0.1
self.prevCore = None
self.train_cycle = 1
self.ipc = 0
self.cycles = 0
self.mapping = []
self.thread_stats = []
self.hetero_score = 0 # Accumulated fairness score
self.metric_last = 0 # State at start of last interval
sim.thread.set_thread_affinity(self.thread_id, ())
def updateScore(self, stats):
self.cycles = stats['time'][self.core].delta * sim.dvfs.get_frequency(
self.core) / 1e9 # convert fs to cycles
instrs = stats['coreinstrs'][self.core].delta
self.ipc = instrs / (self.cycles or 1)
metric_now = self.getScoreMetric()
self.hetero_score += metric_now - self.metric_last
self.metric_last = metric_now
self.thread_stats = self.getStats(stats)
def getStats(self, stats):
result = []
value1 = 0
value2 = 0
for key in STATSORDER:
if type(key) == list:
if type(key[1]) == list:
k_value1 = (stats[key[1][1]])[self.core].delta
k_value2 = (stats[key[1][2]])[self.core].delta
if key[1][0] == '/':
if k_value2 != 0:
value1 = (k_value1 / k_value2)
else:
value1 = 0
elif key[1][0] == '+':
value1 = (k_value1 + k_value2)
else:
value1 = (stats[key[1]])[self.core].delta
if type(key[2]) == list:
k_value1 = (stats[key[2][1]])[self.core].delta
k_value2 = (stats[key[2][2]])[self.core].delta
if key[2][0] == '/':
if k_value2 != 0:
value2 = (k_value1 / k_value2)
else:
value1 = 0
elif key[2][0] == '+':
value2 = (k_value1 + k_value2)
else:
value2 = (stats[key[2]])[self.core].delta
if key[0] == '/':
if value2 != 0:
result.append(value1 / value2)
else:
result.append(0)
elif key[0] == '+':
result.append(value1 + value2)
else:
result.append((stats[key])[self.core].delta)
return result
def normalizeStats(self, stats):
normalized_stats = []
for index, value in enumerate(stats):
min_value = self.getMin(self.accumulated_non_normalized_stats,
index)
max_value = self.getMax(self.accumulated_non_normalized_stats,
index)
normalized_stats.append(
(value - min_value) / (max_value - min_value))
return normalized_stats
def getMax(self, accumulated_non_normalized_stats, index):
max_value = -5000
for stat_list in accumulated_non_normalized_stats:
if stat_list[index] > max_value:
max_value = stat_list[index]
return max_value
def getMin(self, accumulated_non_normalized_stats, index):
min_value = 5000
for stat_list in accumulated_non_normalized_stats:
if stat_list[index] < min_value:
min_value = stat_list[index]
return min_value
def updateHeteroScore(self):
metric_now = self.getScoreMetric()
self.hetero_score += metric_now - self.metric_last
self.metric_last = metric_now
def setScore(self, score):
self.score = score
def setHeteroScore(self, hetero_score):
self.hetero_score = hetero_score
self.metric_last = self.getScoreMetric()
def setCore(self, core_id, time=-1):
self.prevCore = self.core
self.core = core_id
if core_id is None:
self.last_scheduled_out = time
sim.thread.set_thread_affinity(self.thread_id, ())
else:
self.last_scheduled_in = time
sim.thread.set_thread_affinity(self.thread_id, [
c == core_id for c in range(sim.config.ncores)
])
def send_stats(self, stats):
statlist = []
if self.core in BIG:
statlist.append(self.BigIpc)
for key in STATSORDER:
statlist.append((stats[key])[self.core].delta)
jlist = json.dumps(statlist, separators=(',', ':'))
proc = os.popen(PATH + str(0) + " " + jlist).read()
#result = json.loads(proc)
#code above does not work check why
result = proc
return result
if self.core in SMALL:
statlist.append(self.SmallIpc)
for key in STATSORDER:
statlist.append((stats[key])[self.core].delta)
jlist = json.dumps(statlist, separators=(',', ':'))
proc = os.popen(PATH + str(1) + " " + jlist).read()
#result = json.loads(proc)
#code above does not work check why
result = proc
fresult = float(result)
return fresult
def __repr__(self):
r = str(self.thread_id) + " "
if self.core in BIG:
r += " " + "{:.4f}".format(self.ipc) + " "
r += " *" + "{:.4f}".format(self.ipc) + " "
elif self.core in SMALL:
r += " *" + "{:.4f}".format(self.ipc) + " "
r += " " + "{:.4f}".format(self.ipc) + " "
else:
r += " ?" + "{:.4f}".format(self.ipc) + " "
r += " ?" + "{:.4f}".format(self.ipc) + " "
r += "{:.4f}".format(self.score) + " "
r += "R " if self.runnable else "W "
if self.core is not None:
r += str(self.core)
else:
r += "N"
return r
class SchedulerLocality:
predicted_ipc = 0
predicted_mapping = []
prev_predicted_ipc = 0
prediction_gap = []
train_cycle = 0
train_data = []
system_ipcs = []
hetero_timer = 0
frame_1 = 0
frame_2 = 1
frame_3 = 2
frame_4 = 3
def setup(self, args):
print "setup"
self.icount_last = [0 for core in range(sim.config.ncores)]
self.last_reschedule = 0
self.sd = sim.util.StatsDelta()
self.stats = {
'time': [
self.getStatsGetter('performance_model', core, 'elapsed_time')
for core in range(sim.config.ncores)
],
'ffwd_time': [
self.getStatsGetter('fastforward_performance_model', core,
'fastforwarded_time')
for core in range(sim.config.ncores)
],
'instrs': [
self.getStatsGetter('performance_model', core,
'instruction_count')
for core in range(sim.config.ncores)
],
'coreinstrs': [
self.getStatsGetter('core', core, 'instructions')
for core in range(sim.config.ncores)
],
'brhits': [
self.getStatsGetter('branch_predictor', core, 'num-correct')
for core in range(sim.config.ncores)
],
'brmisses': [
self.getStatsGetter('branch_predictor', core, 'num-incorrect')
for core in range(sim.config.ncores)
],
'dramreqs': [
self.getStatsGetter('dram-queue', core, 'num-requests')
for core in range(sim.config.ncores)
],
'dramreads': [
self.getStatsGetter('dram', core, 'reads')
for core in range(sim.config.ncores)
],
'dramwrites': [
self.getStatsGetter('dram', core, 'writes')
for core in range(sim.config.ncores)
],
'dtlbaccess': [
self.getStatsGetter('dtlb', core, 'access')
for core in range(sim.config.ncores)
],
'dtlbmisses': [
self.getStatsGetter('dtlb', core, 'miss')
for core in range(sim.config.ncores)
],
'itlbaccess': [
self.getStatsGetter('itlb', core, 'access')
for core in range(sim.config.ncores)
],
'itlbmisses': [
self.getStatsGetter('itlb', core, 'miss')
for core in range(sim.config.ncores)
],
'stlbaccess': [
self.getStatsGetter('stlb', core, 'access')
for core in range(sim.config.ncores)
],
'stlbmisses': [
self.getStatsGetter('stlb', core, 'miss')
for core in range(sim.config.ncores)
],
'dl1loads': [
self.getStatsGetter('L1-D', core, 'loads')
for core in range(sim.config.ncores)
],
'dl1misses': [
self.getStatsGetter('L1-D', core, 'load-misses')
for core in range(sim.config.ncores)
],
'dl1stores': [
self.getStatsGetter('L1-D', core, 'stores')
for core in range(sim.config.ncores)
],
'il1loads': [
self.getStatsGetter('L1-I', core, 'loads')
for core in range(sim.config.ncores)
],
'il1misses': [
self.getStatsGetter('L1-I', core, 'load-misses')
for core in range(sim.config.ncores)
],
'il1stores': [
self.getStatsGetter('L1-I', core, 'stores')
for core in range(sim.config.ncores)
],
'l2loads': [
self.getStatsGetter('L2', core, 'loads')
for core in range(sim.config.ncores)
],
'l2misses': [
self.getStatsGetter('L2', core, 'load-misses')
for core in range(sim.config.ncores)
],
'l2stores': [
self.getStatsGetter('L2', core, 'stores')
for core in range(sim.config.ncores)
],
'l3loads': [
self.getStatsGetter('L3', core, 'loads')
for core in range(sim.config.ncores)
],
'l3misses': [
self.getStatsGetter('L3', core, 'load-misses')
for core in range(sim.config.ncores)
],
'l3stores': [
self.getStatsGetter('L3', core, 'stores')
for core in range(sim.config.ncores)
],
'uopbr': [
self.getStatsGetter('interval_timer', core, 'uop_branch')
for core in range(sim.config.ncores)
],
'uopfpaddsub': [
self.getStatsGetter('interval_timer', core, 'uop_fp_addsub')
for core in range(sim.config.ncores)
],
'uopfpmuldiv': [
self.getStatsGetter('interval_timer', core, 'uop_fp_muldiv')
for core in range(sim.config.ncores)
],
'uopgeneric': [
self.getStatsGetter('interval_timer', core, 'uop_generic')
for core in range(sim.config.ncores)
],
'uopld': [
self.getStatsGetter('interval_timer', core, 'uop_load')
for core in range(sim.config.ncores)
],
'uopst': [
self.getStatsGetter('interval_timer', core, 'uop_store')
for core in range(sim.config.ncores)
],
'uoptotal': [
self.getStatsGetter('interval_timer', core, 'uops_total')
for core in range(sim.config.ncores)
],
}
args = dict(enumerate((args or '').split(':')))
interval_ns = long(args.get(0, None) or 10000000)
scheduler_type = args.get(1, 'equal_instructions')
core_mask = args.get(2, '')
if scheduler_type == 'equal_time':
self.getScoreMetric = getScoreMetricTime
elif scheduler_type == 'equal_instructions':
self.getScoreMetric = getScoreMetricInstructions
else:
raise ValueError('Invalid scheduler type %s' % scheduler_type)
if core_mask:
core_mask = map(int,
core_mask.split(',')) + [0] * sim.config.ncores
self.cores = [
core for core in range(sim.config.ncores) if core_mask[core]
]
else:
self.cores = range(sim.config.ncores)
sim.util.Every(
1000000 * sim.util.Time.NS,
self.periodic,
statsdelta=self.sd,
roi_only=True)
self.threads = {}
self.last_core = 0
def hook_thread_start(self, thread_id, time):
self.threads[thread_id] = Thread(thread_id)
self.threads[thread_id].runnable = True
# Initial assignment: one thread per core until cores are exhausted
if self.last_core < len(self.cores):
self.threads[thread_id].setCore(self.cores[self.last_core],
sim.stats.time())
self.last_core += 1
else:
self.threads[thread_id].setCore(None, sim.stats.time())
def hook_thread_exit(self, thread_id, time):
self.hook_thread_stall(thread_id, 'exit', time)
def hook_thread_stall(self, thread_id, reason, time):
if reason == 'unscheduled':
# Ignore calls due to the thread being scheduled out
self.threads[thread_id].unscheduled = True
else:
core = self.threads[thread_id].core
self.threads[thread_id].setCore(None, time)
self.threads[thread_id].runnable = False
# Schedule a new thread (runnable, but not running) on this free core
threads = [
thread for thread in self.threads.values()
if thread.runnable and thread.core is None
]
if threads:
# Order by score
threads.sort(key=lambda thread: thread.score)
threads[0].setCore(core, time)
def hook_thread_resume(self, thread_id, woken_by, time):
if self.threads[thread_id].unscheduled:
# Ignore calls due to the thread being scheduled back in
self.threads[thread_id].unscheduled = False
else:
self.threads[thread_id].setHeteroScore(
max([thread.hetero_score for thread in self.threads.values()]))
self.threads[thread_id].runnable = True
#If there is a free core, move us there now
used_cores = set([
thread.core for thread in self.threads.values()
if thread.core is not None
])
free_cores = set(self.cores) - used_cores
if len(free_cores):
self.threads[thread_id].setCore(list(free_cores)[0], time)
def getSystemIPCForPreviousQuantum(self, threads):
system_ipc = 0
for thread in threads:
system_ipc += thread.ipc
return system_ipc
def updateTrainData(self, threads_to_train):
temp = []
for thread in threads_to_train:
temp.extend(thread.thread_stats)
ipc = self.getSystemIPCForPreviousQuantum(threads_to_train)
self.system_ipcs.append(ipc)
self.train_data.append(temp)
def getThreadFrame(self):
running_threads = [
thread for thread in self.threads.values()
if thread.core is not None
]
runable_threads = [
thread for thread in self.threads.values() if thread.runnable
]
if len(runable_threads) <= 4:
return runable_threads
next_quantum_threads = [
thread for thread in runable_threads
if thread not in running_threads
]
result = []
if len(next_quantum_threads) == 4:
return next_quantum_threads
if len(next_quantum_threads) < 4:
t = [
thread for thread in runable_threads
if thread not in next_quantum_threads
]
while len(next_quantum_threads) < 4 or t:
next_quantum_threads.append(t.pop())
return next_quantum_threads
if len(next_quantum_threads) > 4:
for thread in running_threads:
while True:
t_id = thread.thread_id
t_id = (t_id + 1) % max(
[thread.thread_id for thread in next_quantum_threads])
if t_id not in [
thread.thread_id for thread in running_threads
] and t_id in next_quantum_threads:
result.append(self.findThread(self.threads, t_id))
return result
def predict(self, a, b, c, d):
a = json.dumps(a, separators=(',', ':'))
b = json.dumps(b, separators=(',', ':'))
c = json.dumps(c, separators=(',', ':'))
d = json.dumps(d, separators=(',', ':'))
proc = os.popen(PATH + str(1) + " " + MODELNAME + " " + a + " " + b +
" " + c + " " + d).read()
# result = json.loads(proc)
# code above does not work check why
result = proc
print result
# print(result)
# do sys call
# syscall(train_data)
return result
def findThread(self, threads, thread_id):
for thread in self.threads.values():
if thread.thread_id == thread_id:
return thread
def periodic(self, time, time_delta):
order = ""
# Update mapper thread scores
[
thread.updateScore(self.stats) for thread in self.threads.values()
if thread.core is not None
]
threads_to_train = [
thread for thread in self.threads.values()
if thread.core is not None
]
threads_to_train.sort(key=lambda thread: thread.core)
combination_size = len(BIG) + len(SMALL)
if len(threads_to_train) >= 4:
self.updateTrainData(threads_to_train)
# train
self.train(self.train_data)
# Get a list of all runnable threads
threads = [
thread for thread in self.threads.values() if thread.runnable
]
# Order by score
if self.hetero_timer == 3:
threads = self.getThreadFrame()
# continue with 4 threads
self.hetero_timer = 0
self.hetero_timer += 1
if len(threads) >= 4:
if (sim.stats.time() / 1e12) > 5:
a = threads[0].thread_stats
b = threads[1].thread_stats
c = threads[2].thread_stats
d = threads[3].thread_stats
thread_order = [
threads[0].thread_id, threads[1].thread_id,
threads[2].thread_id, threads[3].thread_id
]
if a and b and c and d:
self.predicted_mapping = self.predict(a, b, c, d)
print self.predicted_mapping
if len(self.predicted_mapping) > 5:
order = self.predicted_mapping[:4]
self.prev_predicted_ipc = self.predicted_ipc
self.predicted_ipc = self.predicted_mapping[4:]
self.predicted_ipc = float(''.join(self.predicted_ipc))
# print self.predicted_ipc
# print order
temp = []
temp.append(
self.findThread(self.threads.values(),
thread_order[int(order[0])]))
temp.append(
self.findThread(self.threads.values(),
thread_order[int(order[1])]))
temp.append(
self.findThread(self.threads.values(),
thread_order[int(order[2])]))
temp.append(
self.findThread(self.threads.values(),
thread_order[int(order[3])]))
if len(temp) == 4:
threads = temp
free_cores = [0, 1, 2, 3]
# threads = [ thread for thread in threads if thread.core is None ]
threads = threads[:combination_size]
try:
for thread, core in zip(threads, sorted(free_cores)):
current_thread = [
t for t in self.threads.values() if t.core == core
]
for c_thread in current_thread:
if c_thread is not None:
c_thread.setCore(None)
thread.setCore(core, time)
self.printInfo()
except Exception:
print str(len(threads)) + " threads"
print str(len(threads.values())) + " threads.values"
def train(self, train_data):
jlist = json.dumps(train_data, separators=(',', ':'))
statList = json.dumps(self.system_ipcs, separators=(',', ':'))
proc = os.popen(PATH + str(0) + " " + MODELNAME + " " + jlist + " " +
statList + " ").read()
result = proc
# do sys call
# syscall(train_data)
self.train_cycle = 0
self.train_data = []
self.system_ipcs = []
def getStatsGetter(self, component, core, metric):
# Some components don't exist (i.e. DRAM reads on cores that don't have a DRAM controller),
# return a special object that always returns 0 in these cases
try:
return self.sd.getter(component, core, metric)
print ""
except:
class Zero():
def __init__(self):
self.delta = 0
def update(self):
pass
return Zero()
def testPrint(self):
print '----------- Quantum ', int(
sim.stats.time() / 1e12), '------------'
total_ipc = 0
for thread in self.threads.values():
if thread.core in BIG:
total_ipc += thread.BigIpc
elif thread.core in SMALL:
total_ipc += thread.SmallIpc
print thread
# print 'idle:',
# for core in range(sim.config.ncores):
# print '%2.0f%%' % (
# 100 * sim.stats.get('performance_model', core, 'idle_elapsed_time') / float(sim.stats.time())),
# print '%7d' % sim.stats.get('performance_model', core, 'idle_elapsed_time'),
# print '\nthreads:',
# for thread in range(sim.thread.get_nthreads()):
# print '%7dkins' % (sim.stats.get('thread', thread, 'instruction_count'))
print '-----------------------'
def get_quantum_squareError(self, pred, y):
# pred is the predicted system IPC value and y is the observed IPC value after quantum
e = (pred - y)**2
return e
def get_quantum_percentError(self, pred, y):
# pred is the predicted system IPC value and y is the observed IPC value after quantum
e = abs(pred - y) / y
return e
def printInfo(self):
print '----------- Quantum ', int(
sim.stats.time() / 1e12), '------------'
total_ipc = 0
for thread in self.threads.values():
total_ipc += thread.ipc
print "System IPC : " + str(total_ipc)
mapping = "[ "
core_mapping = defaultdict(list)
for thread in self.threads.values():
core_mapping[thread.core] = thread.thread_id
for i in range(0, (len(BIG) + len(SMALL))):
if core_mapping[i] or core_mapping[i] == 0:
mapping += str(core_mapping[i]) + " "
else:
mapping += "- "
mapping += "]"
if (int(sim.stats.time() / 1e12) > 1):
print "Misprediction : " + str(total_ipc - self.prev_predicted_ipc)
print "Predicted Ipc : " + str(self.predicted_ipc)
print "System Map " + mapping
print "Quantum Square Error : " + str(
self.get_quantum_squareError(self.prev_predicted_ipc, total_ipc))
print "Quantum Percent Error : " + str(
self.get_quantum_percentError(self.prev_predicted_ipc, total_ipc))
print "TId " + "B " + "S " + "Sc " + "Status " + "Core"
for thread in self.threads.values():
print thread
# print "*System IPC : " + str(self.predicted_ipc)
#
# mapping = "[ "
# core_mapping_predicted = defaultdict(list)
# for idx, thread in enumerate(self.predicted_mapping):
# core_mapping_predicted[idx] = thread.thread_id
# for i in range(0, (len(BIG) + len(SMALL))):
# if core_mapping_predicted[i] or core_mapping_predicted[i] == 0:
# mapping += str(core_mapping_predicted[i]) +" "
# else:
# mapping += "- "
# mapping +="]"
# print "*System Map " + mapping
# if(int(sim.stats.time() / 1e12) > 1):
# print "Avarage system misprediction : " + str(sum(self.prediction_gap) / len(self.prediction_gap))
# for thread in self.threads.values():
# if (thread.core in BIG and thread.prevCore in SMALL):
# print "thread id : ", str(thread.thread_id), " misprediction s2b : ", str(thread.BigIpc - thread.prevIPC)
# elif (thread.core in SMALL and thread.prevCore in BIG):
# print "thread id : ", str(thread.thread_id), " misprediction b2s : ", str(thread.SmallIpc - thread.prevIPC)
sim.util.register(SchedulerLocality())
|
py | b400df6a298df73d092069a79255954089946b65 | from neupy.algorithms.linear.base import BaseLinearNetwork
__all__ = ('LMS',)
class LMS(BaseLinearNetwork):
"""
LMS Neural Network. Algorithm has several names,
including the Widrow-Hoff or Delta rule.
Parameters
----------
{BaseLinearNetwork.connection}
{BaseNetwork.step}
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.epoch_end_signal}
{BaseNetwork.train_end_signal}
{Verbose.verbose}
Methods
-------
{BaseSkeleton.predict}
{ConstructibleNetwork.train}
{BaseSkeleton.fit}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> input_data = np.array([[1, 0], [2, 2], [3, 3], [0, 0]])
>>> target_data = np.array([[1], [0], [0], [1]])
>>>
>>> lmsnet = algorithms.LMS((2, 1), step=0.5)
>>>
>>> lmsnet.train(input_data, target_data, epochs=200)
>>> lmsnet.predict(np.array([[4, 4], [0, 0]]))
array([[0],
[1]])
See Also
--------
:network:`Perceptron` : Perceptron Neural Network.
"""
def init_train_updates(self):
layer = self.connection.output_layers[0]
network_output = self.variables.network_output
network_input = self.variables.network_input
step = self.variables.step
summated_output = network_input.dot(layer.weight) + layer.bias
linear_error = summated_output - network_output
normalized_input = network_input / network_input.norm(L=2)
weight_delta = normalized_input.T.dot(linear_error)
bias_delta = linear_error.sum(axis=0)
return [
(layer.weight, layer.weight - step * weight_delta),
(layer.bias, layer.bias - step * bias_delta),
]
|
py | b400df83514894d0d10c59440a5c34f2e19109dd | from __future__ import absolute_import, print_function, division
import operator
from petl.compat import text_type
from petl.errors import DuplicateKeyError
from petl.util.base import Table, asindices, asdict, Record
def lookup(table, key, value=None, dictionary=None):
"""
Load a dictionary with data from the given table. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> lkp = etl.lookup(table1, 'foo', 'bar')
>>> lkp['a']
[1]
>>> lkp['b']
[2, 3]
>>> # if no value argument is given, defaults to the whole
... # row (as a tuple)
... lkp = etl.lookup(table1, 'foo')
>>> lkp['a']
[('a', 1)]
>>> lkp['b']
[('b', 2), ('b', 3)]
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')
>>> lkp[('a', 1)]
[True]
>>> lkp[('b', 2)]
[False]
>>> lkp[('b', 3)]
[True, False]
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.lookup(table1, 'foo', 'bar', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
[1]
>>> lkp['b']
[2, 3]
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if value is None:
value = flds # default value is complete row
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'no value selected'
getkey = operator.itemgetter(*keyindices)
getvalue = operator.itemgetter(*valueindices)
for row in it:
k = getkey(row)
v = getvalue(row)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(v)
dictionary[k] = l
else:
dictionary[k] = [v]
return dictionary
Table.lookup = lookup
def lookupone(table, key, value=None, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, assuming there is
at most one value for each key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> # if the specified key is not unique and strict=False (default),
... # the first value wins
... lkp = etl.lookupone(table1, 'foo', 'bar')
>>> lkp['a']
1
>>> lkp['b']
2
>>> # if the specified key is not unique and strict=True, will raise
... # DuplicateKeyError
... try:
... lkp = etl.lookupone(table1, 'foo', strict=True)
... except etl.errors.DuplicateKeyError as e:
... print(e)
...
duplicate key: 'b'
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')
>>> lkp[('a', 1)]
True
>>> lkp[('b', 2)]
False
>>> lkp[('b', 3)]
True
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.lookupone(table1, 'foo', 'bar', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
1
>>> lkp['b']
2
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if value is None:
value = flds
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'no value selected'
getkey = operator.itemgetter(*keyindices)
getvalue = operator.itemgetter(*valueindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
v = getvalue(row)
dictionary[k] = v
return dictionary
Table.lookupone = lookupone
def dictlookup(table, key, dictionary=None):
"""
Load a dictionary with data from the given table, mapping to dicts. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> lkp = etl.dictlookup(table1, 'foo')
>>> lkp['a']
[{'foo': 'a', 'bar': 1}]
>>> lkp['b']
[{'foo': 'b', 'bar': 2}, {'foo': 'b', 'bar': 3}]
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.dictlookup(table2, ('foo', 'bar'))
>>> lkp[('a', 1)]
[{'foo': 'a', 'baz': True, 'bar': 1}]
>>> lkp[('b', 2)]
[{'foo': 'b', 'baz': False, 'bar': 2}]
>>> lkp[('b', 3)]
[{'foo': 'b', 'baz': True, 'bar': 3}, {'foo': 'b', 'baz': False, 'bar': 3}]
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.dictlookup(table1, 'foo', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
[{'foo': 'a', 'bar': 1}]
>>> lkp['b']
[{'foo': 'b', 'bar': 2}, {'foo': 'b', 'bar': 3}]
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
rec = asdict(flds, row)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(rec)
dictionary[k] = l
else:
dictionary[k] = [rec]
return dictionary
Table.dictlookup = dictlookup
def dictlookupone(table, key, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, mapping to dicts,
assuming there is at most one row for each key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> # if the specified key is not unique and strict=False (default),
... # the first value wins
... lkp = etl.dictlookupone(table1, 'foo')
>>> lkp['a']
{'foo': 'a', 'bar': 1}
>>> lkp['b']
{'foo': 'b', 'bar': 2}
>>> # if the specified key is not unique and strict=True, will raise
... # DuplicateKeyError
... try:
... lkp = etl.dictlookupone(table1, 'foo', strict=True)
... except etl.errors.DuplicateKeyError as e:
... print(e)
...
duplicate key: 'b'
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.dictlookupone(table2, ('foo', 'bar'))
>>> lkp[('a', 1)]
{'foo': 'a', 'baz': True, 'bar': 1}
>>> lkp[('b', 2)]
{'foo': 'b', 'baz': False, 'bar': 2}
>>> lkp[('b', 3)]
{'foo': 'b', 'baz': True, 'bar': 3}
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.dictlookupone(table1, 'foo', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
{'foo': 'a', 'bar': 1}
>>> lkp['b']
{'foo': 'b', 'bar': 2}
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
d = asdict(flds, row)
dictionary[k] = d
return dictionary
Table.dictlookupone = dictlookupone
def recordlookup(table, key, dictionary=None):
"""
Load a dictionary with data from the given table, mapping to record objects.
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
rec = Record(row, flds)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(rec)
dictionary[k] = l
else:
dictionary[k] = [rec]
return dictionary
Table.recordlookup = recordlookup
def recordlookupone(table, key, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, mapping to record objects,
assuming there is at most one row for each key.
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
d = Record(row, flds)
dictionary[k] = d
return dictionary
Table.recordlookupone = recordlookupone
|
py | b400df9118bf243bc8959da20c6b0bd4e2e570ce | import os
import sys
import json
import time
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowObj2PytorchFinetuneTrain(QtWidgets.QWidget):
backward_hyper_param = QtCore.pyqtSignal();
forward_2_pytorch_finetune = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.title = 'Pytorch Finetune - Train'
self.left = 100
self.top = 100
self.width = 900
self.height = 600
self.load_cfg();
self.initUI()
def load_cfg(self):
if(os.path.isfile("obj_2_pytorch_finetune.json")):
with open('obj_2_pytorch_finetune.json') as json_file:
self.system = json.load(json_file)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(700,550)
self.b1.clicked.connect(self.backward)
# Quit
self.b2 = QPushButton('Quit', self)
self.b2.move(800,550)
self.b2.clicked.connect(self.close)
self.tb1 = QTextEdit(self)
self.tb1.move(20, 20)
self.tb1.resize(400, 250)
self.tb1.setText(self.get_params());
self.tb1.setReadOnly(True)
self.l1 = QLabel(self);
self.l1.setText("Num Epochs: ");
self.l1.move(20, 300);
self.e1 = QLineEdit(self)
self.e1.move(120, 300);
self.e1.setText(self.system["epochs"]);
self.e1.resize(200, 25);
self.l2 = QLabel(self);
self.l2.setText("Saved Model Name: ");
self.l2.move(20, 350);
self.e2 = QLineEdit(self)
self.e2.move(170, 350);
self.e2.setText(self.system["output_model_name"]);
self.e2.resize(200, 25);
# Train
self.b3 = QPushButton('Train', self)
self.b3.move(20, 450)
self.b3.clicked.connect(self.train)
# Stop
self.b4 = QPushButton('Stop', self)
self.b4.move(150, 450)
self.b4.clicked.connect(self.stop)
# Infer
self.b4 = QPushButton('Infer', self)
self.b4.move(250, 450)
self.b4.clicked.connect(self.forward)
self.te1 = QTextBrowser(self);
self.te1.move(450, 20);
self.te1.setFixedSize(400, 500);
self.l3 = QLabel(self);
self.l3.setText("Status: ");
self.l3.move(420, 550);
self.tb2 = QTextEdit(self)
self.tb2.move(470, 550)
self.tb2.resize(200, 25)
self.tb2.setText("Not Started");
self.tb2.setReadOnly(True)
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.stdoutReady)
self.process.readyReadStandardError.connect(self.stderrReady)
self.process.setProcessChannelMode(QtCore.QProcess.MergedChannels)
def get_params(self):
wr = "";
wr += "Anno Type - {}\n".format(self.system["anno_type"]);
wr += "Root Dir - {}\n".format(self.system["root_dir"]);
if(self.system["anno_type"] == "monk"):
wr += "Anno Dir - {}\n".format(self.system["anno_dir"]);
else:
wr += "Anno File - {}\n".format(self.system["anno_file"]);
wr += "Model - {}\n".format(self.system["model"]);
wr += "Pretrained - {}\n".format(self.system["use_pretrained"]);
wr += "GPU - {}\n".format(self.system["use_gpu"]);
if(self.system["use_gpu"]):
wr += "Devices - {}\n".format(self.system["devices"]);
return wr;
def train(self):
self.te1.setText("");
self.tb2.setText("Running");
self.system["epochs"] = self.e1.text();
self.system["output_model_name"] = self.e2.text();
with open('obj_2_pytorch_finetune.json', 'w') as outfile:
json.dump(self.system, outfile);
os.system("cp cfg/detection/object_detection/obj_2_pytorch_finetune/train_obj_2_pytorch_finetune.py .");
os.system("cp cfg/detection/object_detection/obj_2_pytorch_finetune/train_obj_2_pytorch_finetune.sh .");
self.process.start('bash', ['train_obj_2_pytorch_finetune.sh'])
self.append("Process PID: " + str(self.process.pid()) + "\n");
def stop(self):
self.tb2.setText("Interrupted");
QMessageBox.about(self, "Training Status", "Interrupted");
self.process.kill();
self.append("Training Stopped\n")
def stdoutReady(self):
text = str(self.process.readAllStandardOutput().data(), encoding='utf-8')
if("Completed" in text):
QMessageBox.about(self, "Training Status", "Completed");
self.tb2.setText("Completed");
if("Error" in text or "error" in text or "ImportError" in text):
self.tb2.setText("Errors Found");
self.append(text)
def stderrReady(self):
text = str(self.process.readAllStandardError().data(), encoding='utf-8')
QMessageBox.about(self, "Training Status", "Errors Found");
self.tb2.setText("Errors Found");
self.append(text)
def append(self, text):
cursor = self.te1.textCursor()
self.te1.ensureCursorVisible()
cursor.movePosition(cursor.End)
cursor.insertText(text)
def forward(self):
self.forward_2_pytorch_finetune.emit();
def backward(self):
self.backward_hyper_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowObj2PytorchFinetuneTrain()
screen.show()
sys.exit(app.exec_())
''' |
py | b400dfb51ce2f4f01bd9cbbcb191ce60d509ef03 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
rates_historic.py
An example using the Darwinex ZeroMQ Connector for Python 3 and MetaTrader 4 PULL REQUEST
for v2.0.1 in which a Client requests rate history from EURGBP Daily from 2019.01.04 to
to 2019.01.14.
-------------------
Rates history:
-------------------
Through commmand HIST, this client can select multiple rates from an INSTRUMENT (symbol, timeframe).
For example, to receive rates from instruments EURUSD(M1), between two dates, it will send this
command to the Server, through its PUSH channel:
"HIST;EURUSD;1;2019.01.04 00:00:00;2019.01.14 00:00:00"
--
@author: [raulMrello](https://www.linkedin.com/in/raul-martin-19254530/)
"""
#############################################################################
# DWX-ZMQ required imports
#############################################################################
# Append path for main project folder
import sys
sys.path.append('../../..')
# Import ZMQ-Strategy from relative path
from examples.template.strategies.base.DWX_ZMQ_Strategy import DWX_ZMQ_Strategy
#############################################################################
# Other required imports
#############################################################################
import os
from pandas import Timedelta, to_datetime, Timestamp
from threading import Thread, Lock
from time import sleep
import random
#############################################################################
# Class derived from DWZ_ZMQ_Strategy includes data processor for PULL,SUB data
#############################################################################
class rates_historic(DWX_ZMQ_Strategy):
def __init__(self,
_name="PRICES_SUBSCRIPTIONS",
_delay=0.1,
_broker_gmt=3,
_verbose=False):
# call DWX_ZMQ_Strategy constructor and passes itself as data processor for handling
# received data on PULL and SUB ports
super().__init__(_name,
[], # Empty symbol list (not needed for this example)
_broker_gmt,
[self], # Registers itself as handler of pull data via self.onPullData()
[self], # Registers itself as handler of sub data via self.onSubData()
_verbose)
# This strategy's variables
self._delay = _delay
self._verbose = _verbose
self._finished = False
# lock for acquire/release of ZeroMQ connector
self._lock = Lock()
##########################################################################
def isFinished(self):
""" Check if execution finished"""
return self._finished
##########################################################################
def onPullData(self, data):
"""
Callback to process new data received through the PULL port
"""
# print responses to request commands
print('\rHistoric from ExpertAdvisor={}'.format(data), end='', flush=True)
# finishes (removes all subscriptions)
self.stop()
##########################################################################
def onSubData(self, data):
"""
Callback to process new data received through the SUB port
"""
# split msg to get topic and message
_topic, _msg = data.split(" ")
print('\rData on Topic={} with Message={}'.format(_topic, _msg), end='', flush=True)
##########################################################################
def run(self):
"""
Request historic data
"""
self._finished = False
# request rates
print('\rRequesting Daily Rates from EURGBP', end='', flush=True)
self._zmq._DWX_MTX_SEND_MARKETHIST_REQUEST_(_symbol='EURGBP',
_timeframe=1440,
_start='2019.01.04 00:00:00',
_end ='2019.01.14 00:00:00')
##########################################################################
def stop(self):
"""
unsubscribe from all market symbols and exits
"""
# remove subscriptions and stop symbols price feeding
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_UNSUBSCRIBE_ALL_MARKETDATA_REQUESTS_()
print('\rUnsubscribing from all topics', end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
self._finished = True
""" -----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
SCRIPT SETUP
-----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
"""
if __name__ == "__main__":
# creates object with a predefined configuration: historic EURGBP_D1 between 4th adn 14th January 2019
print('\rLoading example...', end='', flush=True)
example = rates_historic()
# Starts example execution
print('\Running example...', end='', flush=True)
example.run()
# Waits example termination
print('\rWaiting example termination...', end='', flush=True)
while not example.isFinished():
sleep(1)
print('\rBye!!!', end='', flush=True)
|
py | b400e2c2bcee814af0bc1baa95cc25dcb7f86050 | # Generated by Django 2.2.6 on 2019-10-20 11:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('provider', '0006_auto_20191020_1501'),
]
operations = [
migrations.AlterModelOptions(
name='providerimages',
options={'verbose_name': 'provider Images', 'verbose_name_plural': 'providers Images'},
),
migrations.AlterField(
model_name='providerimages',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='provider.Provider'),
),
]
|
py | b400e30b1a9428f4de8f3a90e32f651b36f133fd | 肖扬贼帅
是挺帅的
还可以
但是我觉得比他帅,我是扛把子
wode
sje
sidwj
sdjsd
sdjsldsdls
saklsk l
ash w as
class yus:s
sdksal
def from int is not and
我是隔壁的泰山,抓住爱情的藤蔓,听说我说
哦 哦 哦
|
py | b400e3b170bb25363a32c8ee5acfc1349dd060e8 |
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from .embedding import embedding
class RankingNeuralCF(tf.keras.Model):
def __init__(self,
unique_user_ids,
unique_movie_ids,
embedding_dimension,
hidden_layers,
dropout_amount,
regularization_weight):
super().__init__()
self.query_embeddings: tf.keras.Model = embedding(unique_user_ids, embedding_dimension)
self.candidate_embeddings: tf.keras.Model = embedding(unique_movie_ids, embedding_dimension)
self.model = Sequential()
for layer_dim in hidden_layers:
self.model.add(Dense(layer_dim,
activation="relu",
kernel_regularizer=regularizers.l2(regularization_weight),
bias_regularizer=regularizers.l2(regularization_weight)))
self.model.add(Dropout(dropout_amount))
self.model.add(Dense(1))
def call(self, userid, movieid, **unused):
query_embedding = self.query_embeddings(userid)
candidate_embedding = self.candidate_embeddings(movieid)
return self.model(tf.concat(values=[query_embedding, candidate_embedding],axis=1))
def print_summary(self):
print(self.query_embeddings.summary())
print(self.candidate_embeddings.summary())
print(self.model.summary())
def ranking_neural_cf(
unique_user_ids: np.array,
unique_item_ids: np.array,
embedding_dimension: int = 32,
hidden_layers: list = [64, 32],
dropout_amount: float = 0.2,
regularization_weight: float = 0.,
**unused) -> tf.keras.Model:
return RankingNeuralCF(unique_user_ids, unique_item_ids, embedding_dimension,
hidden_layers, dropout_amount, regularization_weight) |
py | b400e53f450f7794aea3df96ccf71e157a833b7a | # Generated by pypy/tool/import_cffi.py
import sys, os, py
import pytest
from cffi import FFI, VerificationError, FFIError, CDefError
from cffi import recompiler
from extra_tests.cffi_tests.udir import udir
from extra_tests.cffi_tests.support import u, long
from extra_tests.cffi_tests.support import FdWriteCapture, StdErrCapture, _verify
try:
import importlib
except ImportError:
importlib = None
def check_type_table(input, expected_output, included=None):
ffi = FFI()
if included:
ffi1 = FFI()
ffi1.cdef(included)
ffi.include(ffi1)
ffi.cdef(input)
recomp = recompiler.Recompiler(ffi, 'testmod')
recomp.collect_type_table()
assert ''.join(map(str, recomp.cffi_types)) == expected_output
def verify(ffi, module_name, source, *args, **kwds):
no_cpp = kwds.pop('no_cpp', False)
ignore_warnings = kwds.pop('ignore_warnings', False)
kwds.setdefault('undef_macros', ['NDEBUG'])
module_name = '_CFFI_' + module_name
ffi.set_source(module_name, source)
if not os.environ.get('NO_CPP') and not no_cpp: # test the .cpp mode too
kwds.setdefault('source_extension', '.cpp')
source = 'extern "C" {\n%s\n}' % (source,)
elif sys.platform != 'win32' and not ignore_warnings:
# add '-Werror' to the existing 'extra_compile_args' flags
from extra_tests.cffi_tests.support import extra_compile_args
kwds['extra_compile_args'] = (kwds.get('extra_compile_args', []) +
extra_compile_args)
if sys.platform == 'darwin':
kwds['extra_link_args'] = (kwds.get('extra_link_args', []) +
['-stdlib=libc++'])
return _verify(ffi, module_name, source, *args, **kwds)
def test_set_source_no_slashes():
ffi = FFI()
py.test.raises(ValueError, ffi.set_source, "abc/def", None)
py.test.raises(ValueError, ffi.set_source, "abc/def", "C code")
def test_type_table_func():
check_type_table("double sin(double);",
"(FUNCTION 1)(PRIMITIVE 14)(FUNCTION_END 0)")
check_type_table("float sin(double);",
"(FUNCTION 3)(PRIMITIVE 14)(FUNCTION_END 0)(PRIMITIVE 13)")
check_type_table("float sin(void);",
"(FUNCTION 2)(FUNCTION_END 0)(PRIMITIVE 13)")
check_type_table("double sin(float); double cos(float);",
"(FUNCTION 3)(PRIMITIVE 13)(FUNCTION_END 0)(PRIMITIVE 14)")
check_type_table("double sin(float); double cos(double);",
"(FUNCTION 1)(PRIMITIVE 14)(FUNCTION_END 0)" # cos
"(FUNCTION 1)(PRIMITIVE 13)(FUNCTION_END 0)") # sin
check_type_table("float sin(double); float cos(float);",
"(FUNCTION 4)(PRIMITIVE 14)(FUNCTION_END 0)" # sin
"(FUNCTION 4)(PRIMITIVE 13)(FUNCTION_END 0)") # cos
def test_type_table_use_noop_for_repeated_args():
check_type_table("double sin(double *, double *);",
"(FUNCTION 4)(POINTER 4)(NOOP 1)(FUNCTION_END 0)"
"(PRIMITIVE 14)")
check_type_table("double sin(double *, double *, double);",
"(FUNCTION 3)(POINTER 3)(NOOP 1)(PRIMITIVE 14)"
"(FUNCTION_END 0)")
def test_type_table_dont_use_noop_for_primitives():
check_type_table("double sin(double, double);",
"(FUNCTION 1)(PRIMITIVE 14)(PRIMITIVE 14)(FUNCTION_END 0)")
def test_type_table_funcptr_as_argument():
check_type_table("int sin(double(float));",
"(FUNCTION 6)(PRIMITIVE 13)(FUNCTION_END 0)"
"(FUNCTION 7)(POINTER 0)(FUNCTION_END 0)"
"(PRIMITIVE 14)(PRIMITIVE 7)")
def test_type_table_variadic_function():
check_type_table("int sin(int, ...);",
"(FUNCTION 1)(PRIMITIVE 7)(FUNCTION_END 1)(POINTER 0)")
def test_type_table_array():
check_type_table("extern int a[100];",
"(PRIMITIVE 7)(ARRAY 0)(None 100)")
def test_type_table_typedef():
check_type_table("typedef int foo_t;",
"(PRIMITIVE 7)")
def test_type_table_prebuilt_type():
check_type_table("int32_t f(void);",
"(FUNCTION 2)(FUNCTION_END 0)(PRIMITIVE 21)")
def test_type_table_struct_opaque():
check_type_table("struct foo_s;",
"(STRUCT_UNION 0)")
def test_type_table_struct():
check_type_table("struct foo_s { int a; long b; };",
"(PRIMITIVE 7)(PRIMITIVE 9)(STRUCT_UNION 0)")
def test_type_table_union():
check_type_table("union foo_u { int a; long b; };",
"(PRIMITIVE 7)(PRIMITIVE 9)(STRUCT_UNION 0)")
def test_type_table_struct_used():
check_type_table("struct foo_s { int a; long b; }; int f(struct foo_s*);",
"(FUNCTION 3)(POINTER 5)(FUNCTION_END 0)"
"(PRIMITIVE 7)(PRIMITIVE 9)"
"(STRUCT_UNION 0)")
def test_type_table_anonymous_struct_with_typedef():
check_type_table("typedef struct { int a; long b; } foo_t;",
"(STRUCT_UNION 0)(PRIMITIVE 7)(PRIMITIVE 9)")
def test_type_table_enum():
check_type_table("enum foo_e { AA, BB, ... };",
"(ENUM 0)")
def test_type_table_include_1():
check_type_table("foo_t sin(foo_t);",
"(FUNCTION 1)(PRIMITIVE 14)(FUNCTION_END 0)",
included="typedef double foo_t;")
def test_type_table_include_2():
check_type_table("struct foo_s *sin(struct foo_s *);",
"(FUNCTION 1)(POINTER 3)(FUNCTION_END 0)(STRUCT_UNION 0)",
included="struct foo_s { int x, y; };")
def test_math_sin():
import math
ffi = FFI()
ffi.cdef("float sin(double); double cos(double);")
lib = verify(ffi, 'test_math_sin', '#include <math.h>',
ignore_warnings=True)
assert lib.cos(1.43) == math.cos(1.43)
def test_repr_lib():
ffi = FFI()
lib = verify(ffi, 'test_repr_lib', '')
assert repr(lib) == "<Lib object for '_CFFI_test_repr_lib'>"
def test_funcarg_ptr():
ffi = FFI()
ffi.cdef("int foo(int *);")
lib = verify(ffi, 'test_funcarg_ptr', 'int foo(int *p) { return *p; }')
assert lib.foo([-12345]) == -12345
def test_funcres_ptr():
ffi = FFI()
ffi.cdef("int *foo(void);")
lib = verify(ffi, 'test_funcres_ptr',
'int *foo(void) { static int x=-12345; return &x; }')
assert lib.foo()[0] == -12345
def test_global_var_array():
ffi = FFI()
ffi.cdef("extern int a[100];")
lib = verify(ffi, 'test_global_var_array', 'int a[100] = { 9999 };')
lib.a[42] = 123456
assert lib.a[42] == 123456
assert lib.a[0] == 9999
def test_verify_typedef():
ffi = FFI()
ffi.cdef("typedef int **foo_t;")
lib = verify(ffi, 'test_verify_typedef', 'typedef int **foo_t;')
assert ffi.sizeof("foo_t") == ffi.sizeof("void *")
def test_verify_typedef_dotdotdot():
ffi = FFI()
ffi.cdef("typedef ... foo_t;")
verify(ffi, 'test_verify_typedef_dotdotdot', 'typedef int **foo_t;')
def test_verify_typedef_star_dotdotdot():
ffi = FFI()
ffi.cdef("typedef ... *foo_t;")
verify(ffi, 'test_verify_typedef_star_dotdotdot', 'typedef int **foo_t;')
def test_global_var_int():
ffi = FFI()
ffi.cdef("extern int a, b, c;")
lib = verify(ffi, 'test_global_var_int', 'int a = 999, b, c;')
assert lib.a == 999
lib.a -= 1001
assert lib.a == -2
lib.a = -2147483648
assert lib.a == -2147483648
with pytest.raises(OverflowError):
lib.a = 2147483648
with pytest.raises(OverflowError):
lib.a = -2147483649
lib.b = 525 # try with the first access being in setattr, too
assert lib.b == 525
with pytest.raises(AttributeError):
del lib.a
with pytest.raises(AttributeError):
del lib.c
with pytest.raises(AttributeError):
del lib.foobarbaz
def test_macro():
ffi = FFI()
ffi.cdef("#define FOOBAR ...")
lib = verify(ffi, 'test_macro', "#define FOOBAR (-6912)")
assert lib.FOOBAR == -6912
with pytest.raises(AttributeError):
lib.FOOBAR = 2
def test_macro_check_value():
# the value '-0x80000000' in C sources does not have a clear meaning
# to me; it appears to have a different effect than '-2147483648'...
# Moreover, on 32-bits, -2147483648 is actually equal to
# -2147483648U, which in turn is equal to 2147483648U and so positive.
vals = ['42', '-42', '0x80000000', '-2147483648',
'0', '9223372036854775809ULL',
'-9223372036854775807LL']
if sys.maxsize <= 2**32 or sys.platform == 'win32':
vals.remove('-2147483648')
ffi = FFI()
cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i])
for i in range(len(vals))
for j in range(len(vals))]
ffi.cdef('\n'.join(cdef_lines))
verify_lines = ['#define FOO_%d_%d %s' % (i, j, vals[j]) # [j], not [i]
for i in range(len(vals))
for j in range(len(vals))]
lib = verify(ffi, 'test_macro_check_value_ok',
'\n'.join(verify_lines))
#
for j in range(len(vals)):
c_got = int(vals[j].replace('U', '').replace('L', ''), 0)
c_compiler_msg = str(c_got)
if c_got > 0:
c_compiler_msg += ' (0x%x)' % (c_got,)
#
for i in range(len(vals)):
attrname = 'FOO_%d_%d' % (i, j)
if i == j:
x = getattr(lib, attrname)
assert x == c_got
else:
e = py.test.raises(ffi.error, getattr, lib, attrname)
assert str(e.value) == (
"the C compiler says '%s' is equal to "
"%s, but the cdef disagrees" % (attrname, c_compiler_msg))
def test_constant():
ffi = FFI()
ffi.cdef("static const int FOOBAR;")
lib = verify(ffi, 'test_constant', "#define FOOBAR (-6912)")
assert lib.FOOBAR == -6912
with pytest.raises(AttributeError):
lib.FOOBAR = 2
def test_check_value_of_static_const():
ffi = FFI()
ffi.cdef("static const int FOOBAR = 042;")
lib = verify(ffi, 'test_check_value_of_static_const',
"#define FOOBAR (-6912)")
e = py.test.raises(ffi.error, getattr, lib, 'FOOBAR')
assert str(e.value) == (
"the C compiler says 'FOOBAR' is equal to -6912, but the cdef disagrees")
def test_constant_nonint():
ffi = FFI()
ffi.cdef("static const double FOOBAR;")
lib = verify(ffi, 'test_constant_nonint', "#define FOOBAR (-6912.5)")
assert lib.FOOBAR == -6912.5
with pytest.raises(AttributeError):
lib.FOOBAR = 2
def test_constant_ptr():
ffi = FFI()
ffi.cdef("static double *const FOOBAR;")
lib = verify(ffi, 'test_constant_ptr', "#define FOOBAR NULL")
assert lib.FOOBAR == ffi.NULL
assert ffi.typeof(lib.FOOBAR) == ffi.typeof("double *")
def test_dir():
ffi = FFI()
ffi.cdef("int ff(int); extern int aa; static const int my_constant;")
lib = verify(ffi, 'test_dir', """
#define my_constant (-45)
int aa;
int ff(int x) { return x+aa; }
""")
lib.aa = 5
assert dir(lib) == ['aa', 'ff', 'my_constant']
#
aaobj = lib.__dict__['aa']
assert not isinstance(aaobj, int) # some internal object instead
assert lib.__dict__ == {
'ff': lib.ff,
'aa': aaobj,
'my_constant': -45}
lib.__dict__['ff'] = "??"
assert lib.ff(10) == 15
def test_verify_opaque_struct():
ffi = FFI()
ffi.cdef("struct foo_s;")
lib = verify(ffi, 'test_verify_opaque_struct', "struct foo_s;")
assert ffi.typeof("struct foo_s").cname == "struct foo_s"
def test_verify_opaque_union():
ffi = FFI()
ffi.cdef("union foo_s;")
lib = verify(ffi, 'test_verify_opaque_union', "union foo_s;")
assert ffi.typeof("union foo_s").cname == "union foo_s"
def test_verify_struct():
ffi = FFI()
ffi.cdef("""struct foo_s { int b; short a; ...; };
struct bar_s { struct foo_s *f; };""")
lib = verify(ffi, 'test_verify_struct',
"""struct foo_s { short a; int b; };
struct bar_s { struct foo_s *f; };""")
ffi.typeof("struct bar_s *")
p = ffi.new("struct foo_s *", {'a': -32768, 'b': -2147483648})
assert p.a == -32768
assert p.b == -2147483648
with pytest.raises(OverflowError):
p.a -= 1
with pytest.raises(OverflowError):
p.b -= 1
q = ffi.new("struct bar_s *", {'f': p})
assert q.f == p
#
assert ffi.offsetof("struct foo_s", "a") == 0
assert ffi.offsetof("struct foo_s", "b") == 4
assert ffi.offsetof(u+"struct foo_s", u+"b") == 4
#
py.test.raises(TypeError, ffi.addressof, p)
assert ffi.addressof(p[0]) == p
assert ffi.typeof(ffi.addressof(p[0])) is ffi.typeof("struct foo_s *")
assert ffi.typeof(ffi.addressof(p, "b")) is ffi.typeof("int *")
assert ffi.addressof(p, "b")[0] == p.b
def test_verify_exact_field_offset():
ffi = FFI()
ffi.cdef("""struct foo_s { int b; short a; };""")
lib = verify(ffi, 'test_verify_exact_field_offset',
"""struct foo_s { short a; int b; };""")
e = py.test.raises(ffi.error, ffi.new, "struct foo_s *", []) # lazily
assert str(e.value).startswith(
"struct foo_s: wrong offset for field 'b' (cdef "
'says 0, but C compiler says 4). fix it or use "...;" ')
def test_type_caching():
ffi1 = FFI(); ffi1.cdef("struct foo_s;")
ffi2 = FFI(); ffi2.cdef("struct foo_s;") # different one!
lib1 = verify(ffi1, 'test_type_caching_1', 'struct foo_s;')
lib2 = verify(ffi2, 'test_type_caching_2', 'struct foo_s;')
# shared types
assert ffi1.typeof("long") is ffi2.typeof("long")
assert ffi1.typeof("long**") is ffi2.typeof("long * *")
assert ffi1.typeof("long(*)(int, ...)") is ffi2.typeof("long(*)(int, ...)")
# non-shared types
assert ffi1.typeof("struct foo_s") is not ffi2.typeof("struct foo_s")
assert ffi1.typeof("struct foo_s *") is not ffi2.typeof("struct foo_s *")
assert ffi1.typeof("struct foo_s*(*)()") is not (
ffi2.typeof("struct foo_s*(*)()"))
assert ffi1.typeof("void(*)(struct foo_s*)") is not (
ffi2.typeof("void(*)(struct foo_s*)"))
def test_verify_enum():
ffi = FFI()
ffi.cdef("""enum e1 { B1, A1, ... }; enum e2 { B2, A2, ... };""")
lib = verify(ffi, 'test_verify_enum',
"enum e1 { A1, B1, C1=%d };" % sys.maxsize +
"enum e2 { A2, B2, C2 };")
ffi.typeof("enum e1")
ffi.typeof("enum e2")
assert lib.A1 == 0
assert lib.B1 == 1
assert lib.A2 == 0
assert lib.B2 == 1
assert ffi.sizeof("enum e1") == ffi.sizeof("long")
assert ffi.sizeof("enum e2") == ffi.sizeof("int")
assert repr(ffi.cast("enum e1", 0)) == "<cdata 'enum e1' 0: A1>"
def test_duplicate_enum():
ffi = FFI()
ffi.cdef("enum e1 { A1, ... }; enum e2 { A1, ... };")
py.test.raises(VerificationError, verify, ffi, 'test_duplicate_enum',
"enum e1 { A1 }; enum e2 { B1 };")
def test_dotdotdot_length_of_array_field():
ffi = FFI()
ffi.cdef("struct foo_s { int a[...]; int b[...]; };")
verify(ffi, 'test_dotdotdot_length_of_array_field',
"struct foo_s { int a[42]; int b[11]; };")
assert ffi.sizeof("struct foo_s") == (42 + 11) * 4
p = ffi.new("struct foo_s *")
assert p.a[41] == p.b[10] == 0
with pytest.raises(IndexError):
p.a[42]
with pytest.raises(IndexError):
p.b[11]
def test_dotdotdot_global_array():
ffi = FFI()
ffi.cdef("extern int aa[...]; extern int bb[...];")
lib = verify(ffi, 'test_dotdotdot_global_array',
"int aa[41]; int bb[12];")
assert ffi.sizeof(lib.aa) == 41 * 4
assert ffi.sizeof(lib.bb) == 12 * 4
assert lib.aa[40] == lib.bb[11] == 0
with pytest.raises(IndexError):
lib.aa[41]
with pytest.raises(IndexError):
lib.bb[12]
def test_misdeclared_field_1():
ffi = FFI()
ffi.cdef("struct foo_s { int a[5]; };")
try:
verify(ffi, 'test_misdeclared_field_1',
"struct foo_s { int a[6]; };")
except VerificationError:
pass # ok, fail during compilation already (e.g. C++)
else:
assert ffi.sizeof("struct foo_s") == 24 # found by the actual C code
try:
# lazily build the fields and boom:
p = ffi.new("struct foo_s *")
p.a
assert False, "should have raised"
except ffi.error as e:
assert str(e).startswith("struct foo_s: wrong size for field 'a' "
"(cdef says 20, but C compiler says 24)")
def test_open_array_in_struct():
ffi = FFI()
ffi.cdef("struct foo_s { int b; int a[]; };")
verify(ffi, 'test_open_array_in_struct',
"struct foo_s { int b; int a[]; };")
assert ffi.sizeof("struct foo_s") == 4
p = ffi.new("struct foo_s *", [5, [10, 20, 30, 40]])
assert p.a[2] == 30
assert ffi.sizeof(p) == ffi.sizeof("void *")
assert ffi.sizeof(p[0]) == 5 * ffi.sizeof("int")
def test_math_sin_type():
ffi = FFI()
ffi.cdef("double sin(double); void *xxtestfunc();")
lib = verify(ffi, 'test_math_sin_type', """
#include <math.h>
void *xxtestfunc(void) { return 0; }
""")
# 'lib.sin' is typed as a <built-in method> object on lib
assert ffi.typeof(lib.sin).cname == "double(*)(double)"
# 'x' is another <built-in method> object on lib, made very indirectly
x = type(lib).__dir__.__get__(lib)
py.test.raises(TypeError, ffi.typeof, x)
#
# present on built-in functions on CPython; must be emulated on PyPy:
assert lib.sin.__name__ == 'sin'
assert lib.sin.__module__ == '_CFFI_test_math_sin_type'
assert lib.sin.__doc__ == (
"double sin(double);\n"
"\n"
"CFFI C function from _CFFI_test_math_sin_type.lib")
assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()"
assert lib.xxtestfunc.__doc__ == (
"void *xxtestfunc();\n"
"\n"
"CFFI C function from _CFFI_test_math_sin_type.lib")
def test_verify_anonymous_struct_with_typedef():
ffi = FFI()
ffi.cdef("typedef struct { int a; long b; ...; } foo_t;")
verify(ffi, 'test_verify_anonymous_struct_with_typedef',
"typedef struct { long b; int hidden, a; } foo_t;")
p = ffi.new("foo_t *", {'b': 42})
assert p.b == 42
assert repr(p).startswith("<cdata 'foo_t *' ")
def test_verify_anonymous_struct_with_star_typedef():
ffi = FFI()
ffi.cdef("typedef struct { int a; long b; } *foo_t;")
verify(ffi, 'test_verify_anonymous_struct_with_star_typedef',
"typedef struct { int a; long b; } *foo_t;")
p = ffi.new("foo_t", {'b': 42})
assert p.b == 42
def test_verify_anonymous_enum_with_typedef():
ffi = FFI()
ffi.cdef("typedef enum { AA, ... } e1;")
lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef1',
"typedef enum { BB, CC, AA } e1;")
assert lib.AA == 2
assert ffi.sizeof("e1") == ffi.sizeof("int")
assert repr(ffi.cast("e1", 2)) == "<cdata 'e1' 2: AA>"
#
ffi = FFI()
ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxsize)
lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2',
"typedef enum { AA=%d } e1;" % sys.maxsize)
assert lib.AA == int(ffi.cast("long", sys.maxsize))
assert ffi.sizeof("e1") == ffi.sizeof("long")
def test_unique_types():
CDEF = "struct foo_s; union foo_u; enum foo_e { AA };"
ffi1 = FFI(); ffi1.cdef(CDEF); verify(ffi1, "test_unique_types_1", CDEF)
ffi2 = FFI(); ffi2.cdef(CDEF); verify(ffi2, "test_unique_types_2", CDEF)
#
assert ffi1.typeof("char") is ffi2.typeof("char ")
assert ffi1.typeof("long") is ffi2.typeof("signed long int")
assert ffi1.typeof("double *") is ffi2.typeof("double*")
assert ffi1.typeof("int ***") is ffi2.typeof(" int * * *")
assert ffi1.typeof("int[]") is ffi2.typeof("signed int[]")
assert ffi1.typeof("signed int*[17]") is ffi2.typeof("int *[17]")
assert ffi1.typeof("void") is ffi2.typeof("void")
assert ffi1.typeof("int(*)(int,int)") is ffi2.typeof("int(*)(int,int)")
#
# these depend on user-defined data, so should not be shared
for name in ["struct foo_s",
"union foo_u *",
"enum foo_e",
"struct foo_s *(*)()",
"void(*)(struct foo_s *)",
"struct foo_s *(*[5])[8]",
]:
assert ffi1.typeof(name) is not ffi2.typeof(name)
# sanity check: twice 'ffi1'
assert ffi1.typeof("struct foo_s*") is ffi1.typeof("struct foo_s *")
def test_module_name_in_package():
ffi = FFI()
ffi.cdef("int foo(int);")
recompiler.recompile(ffi, "test_module_name_in_package.mymod",
"int foo(int x) { return x + 32; }",
tmpdir=str(udir))
old_sys_path = sys.path[:]
try:
package_dir = udir.join('test_module_name_in_package')
for name in os.listdir(str(udir)):
assert not name.startswith('test_module_name_in_package.')
assert os.path.isdir(str(package_dir))
assert len(os.listdir(str(package_dir))) > 0
assert os.path.exists(str(package_dir.join('mymod.c')))
package_dir.join('__init__.py').write('')
#
getattr(importlib, 'invalidate_caches', object)()
#
sys.path.insert(0, str(udir))
import test_module_name_in_package.mymod
assert test_module_name_in_package.mymod.lib.foo(10) == 42
assert test_module_name_in_package.mymod.__name__ == (
'test_module_name_in_package.mymod')
finally:
sys.path[:] = old_sys_path
def test_bad_size_of_global_1():
ffi = FFI()
ffi.cdef("extern short glob;")
py.test.raises(VerificationError, verify, ffi,
"test_bad_size_of_global_1", "long glob;")
def test_bad_size_of_global_2():
ffi = FFI()
ffi.cdef("extern int glob[10];")
py.test.raises(VerificationError, verify, ffi,
"test_bad_size_of_global_2", "int glob[9];")
def test_unspecified_size_of_global_1():
ffi = FFI()
ffi.cdef("extern int glob[];")
lib = verify(ffi, "test_unspecified_size_of_global_1", "int glob[10];")
assert ffi.typeof(lib.glob) == ffi.typeof("int *")
def test_unspecified_size_of_global_2():
ffi = FFI()
ffi.cdef("extern int glob[][5];")
lib = verify(ffi, "test_unspecified_size_of_global_2", "int glob[10][5];")
assert ffi.typeof(lib.glob) == ffi.typeof("int(*)[5]")
def test_unspecified_size_of_global_3():
ffi = FFI()
ffi.cdef("extern int glob[][...];")
lib = verify(ffi, "test_unspecified_size_of_global_3", "int glob[10][5];")
assert ffi.typeof(lib.glob) == ffi.typeof("int(*)[5]")
def test_unspecified_size_of_global_4():
ffi = FFI()
ffi.cdef("extern int glob[...][...];")
lib = verify(ffi, "test_unspecified_size_of_global_4", "int glob[10][5];")
assert ffi.typeof(lib.glob) == ffi.typeof("int[10][5]")
def test_include_1():
ffi1 = FFI()
ffi1.cdef("typedef double foo_t;")
verify(ffi1, "test_include_1_parent", "typedef double foo_t;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("foo_t ff1(foo_t);")
lib = verify(ffi, "test_include_1", "double ff1(double x) { return 42.5; }")
assert lib.ff1(0) == 42.5
assert ffi1.typeof("foo_t") is ffi.typeof("foo_t") is ffi.typeof("double")
def test_include_1b():
ffi1 = FFI()
ffi1.cdef("int foo1(int);")
lib1 = verify(ffi1, "test_include_1b_parent",
"int foo1(int x) { return x + 10; }")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("int foo2(int);")
lib = verify(ffi, "test_include_1b", "int foo2(int x) { return x - 5; }")
assert lib.foo2(42) == 37
assert lib.foo1(42) == 52
assert lib.foo1 is lib1.foo1
def test_include_2():
ffi1 = FFI()
ffi1.cdef("struct foo_s { int x, y; };")
verify(ffi1, "test_include_2_parent", "struct foo_s { int x, y; };")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("struct foo_s *ff2(struct foo_s *);")
lib = verify(ffi, "test_include_2",
"struct foo_s { int x, y; }; //usually from a #include\n"
"struct foo_s *ff2(struct foo_s *p) { p->y++; return p; }")
p = ffi.new("struct foo_s *")
p.y = 41
q = lib.ff2(p)
assert q == p
assert p.y == 42
assert ffi1.typeof("struct foo_s") is ffi.typeof("struct foo_s")
def test_include_3():
ffi1 = FFI()
ffi1.cdef("typedef short sshort_t;")
verify(ffi1, "test_include_3_parent", "typedef short sshort_t;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("sshort_t ff3(sshort_t);")
lib = verify(ffi, "test_include_3",
"typedef short sshort_t; //usually from a #include\n"
"sshort_t ff3(sshort_t x) { return (sshort_t)(x + 42); }")
assert lib.ff3(10) == 52
assert ffi.typeof(ffi.cast("sshort_t", 42)) is ffi.typeof("short")
assert ffi1.typeof("sshort_t") is ffi.typeof("sshort_t")
def test_include_4():
ffi1 = FFI()
ffi1.cdef("typedef struct { int x; } mystruct_t;")
verify(ffi1, "test_include_4_parent",
"typedef struct { int x; } mystruct_t;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("mystruct_t *ff4(mystruct_t *);")
lib = verify(ffi, "test_include_4",
"typedef struct {int x; } mystruct_t; //usually from a #include\n"
"mystruct_t *ff4(mystruct_t *p) { p->x += 42; return p; }")
p = ffi.new("mystruct_t *", [10])
q = lib.ff4(p)
assert q == p
assert p.x == 52
assert ffi1.typeof("mystruct_t") is ffi.typeof("mystruct_t")
def test_include_5():
ffi1 = FFI()
ffi1.cdef("typedef struct { int x[2]; int y; } *mystruct_p;")
verify(ffi1, "test_include_5_parent",
"typedef struct { int x[2]; int y; } *mystruct_p;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("mystruct_p ff5(mystruct_p);")
lib = verify(ffi, "test_include_5",
"typedef struct {int x[2]; int y; } *mystruct_p; //usually #include\n"
"mystruct_p ff5(mystruct_p p) { p->x[1] += 42; return p; }")
assert ffi.alignof(ffi.typeof("mystruct_p").item) == 4
assert ffi1.typeof("mystruct_p") is ffi.typeof("mystruct_p")
p = ffi.new("mystruct_p", [[5, 10], -17])
q = lib.ff5(p)
assert q == p
assert p.x[0] == 5
assert p.x[1] == 52
assert p.y == -17
assert ffi.alignof(ffi.typeof(p[0])) == 4
def test_include_6():
ffi1 = FFI()
ffi1.cdef("typedef ... mystruct_t;")
verify(ffi1, "test_include_6_parent",
"typedef struct _mystruct_s mystruct_t;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("mystruct_t *ff6(void); int ff6b(mystruct_t *);")
lib = verify(ffi, "test_include_6",
"typedef struct _mystruct_s mystruct_t; //usually from a #include\n"
"struct _mystruct_s { int x; };\n"
"static mystruct_t result_struct = { 42 };\n"
"mystruct_t *ff6(void) { return &result_struct; }\n"
"int ff6b(mystruct_t *p) { return p->x; }")
p = lib.ff6()
assert ffi.cast("int *", p)[0] == 42
assert lib.ff6b(p) == 42
def test_include_7():
ffi1 = FFI()
ffi1.cdef("typedef ... mystruct_t;\n"
"int ff7b(mystruct_t *);")
verify(ffi1, "test_include_7_parent",
"typedef struct { int x; } mystruct_t;\n"
"int ff7b(mystruct_t *p) { return p->x; }")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("mystruct_t *ff7(void);")
lib = verify(ffi, "test_include_7",
"typedef struct { int x; } mystruct_t; //usually from a #include\n"
"static mystruct_t result_struct = { 42 };"
"mystruct_t *ff7(void) { return &result_struct; }")
p = lib.ff7()
assert ffi.cast("int *", p)[0] == 42
assert lib.ff7b(p) == 42
def test_include_8():
ffi1 = FFI()
ffi1.cdef("struct foo_s;")
verify(ffi1, "test_include_8_parent", "struct foo_s;")
ffi = FFI()
ffi.include(ffi1)
ffi.cdef("struct foo_s { int x, y; };")
verify(ffi, "test_include_8", "struct foo_s { int x, y; };")
e = py.test.raises(NotImplementedError, ffi.new, "struct foo_s *")
assert str(e.value) == (
"'struct foo_s' is opaque in the ffi.include(), but no longer in "
"the ffi doing the include (workaround: don't use ffi.include() but"
" duplicate the declarations of everything using struct foo_s)")
def test_unicode_libraries():
try:
unicode
except NameError:
py.test.skip("for python 2.x")
#
import math
lib_m = "m"
if sys.platform == 'win32':
#there is a small chance this fails on Mingw via environ $CC
import distutils.ccompiler
if distutils.ccompiler.get_default_compiler() == 'msvc':
lib_m = 'msvcrt'
ffi = FFI()
ffi.cdef(unicode("float sin(double); double cos(double);"))
lib = verify(ffi, 'test_math_sin_unicode', unicode('#include <math.h>'),
libraries=[unicode(lib_m)], ignore_warnings=True)
assert lib.cos(1.43) == math.cos(1.43)
def test_incomplete_struct_as_arg():
ffi = FFI()
ffi.cdef("struct foo_s { int x; ...; }; int f(int, struct foo_s);")
lib = verify(ffi, "test_incomplete_struct_as_arg",
"struct foo_s { int a, x, z; };\n"
"int f(int b, struct foo_s s) { return s.x * b; }")
s = ffi.new("struct foo_s *", [21])
assert s.x == 21
assert ffi.sizeof(s[0]) == 12
assert ffi.offsetof(ffi.typeof(s), 'x') == 4
assert lib.f(2, s[0]) == 42
assert ffi.typeof(lib.f) == ffi.typeof("int(*)(int, struct foo_s)")
def test_incomplete_struct_as_result():
ffi = FFI()
ffi.cdef("struct foo_s { int x; ...; }; struct foo_s f(int);")
lib = verify(ffi, "test_incomplete_struct_as_result",
"struct foo_s { int a, x, z; };\n"
"struct foo_s f(int x) { struct foo_s r; r.x = x * 2; return r; }")
s = lib.f(21)
assert s.x == 42
assert ffi.typeof(lib.f) == ffi.typeof("struct foo_s(*)(int)")
def test_incomplete_struct_as_both():
ffi = FFI()
ffi.cdef("struct foo_s { int x; ...; }; struct bar_s { int y; ...; };\n"
"struct foo_s f(int, struct bar_s);")
lib = verify(ffi, "test_incomplete_struct_as_both",
"struct foo_s { int a, x, z; };\n"
"struct bar_s { int b, c, y, d; };\n"
"struct foo_s f(int x, struct bar_s b) {\n"
" struct foo_s r; r.x = x * b.y; return r;\n"
"}")
b = ffi.new("struct bar_s *", [7])
s = lib.f(6, b[0])
assert s.x == 42
assert ffi.typeof(lib.f) == ffi.typeof(
"struct foo_s(*)(int, struct bar_s)")
s = lib.f(14, {'y': -3})
assert s.x == -42
def test_name_of_unnamed_struct():
ffi = FFI()
ffi.cdef("typedef struct { int x; } foo_t;\n"
"typedef struct { int y; } *bar_p;\n"
"typedef struct { int y; } **baz_pp;\n")
verify(ffi, "test_name_of_unnamed_struct",
"typedef struct { int x; } foo_t;\n"
"typedef struct { int y; } *bar_p;\n"
"typedef struct { int y; } **baz_pp;\n")
assert repr(ffi.typeof("foo_t")) == "<ctype 'foo_t'>"
assert repr(ffi.typeof("bar_p")) == "<ctype 'struct $1 *'>"
assert repr(ffi.typeof("baz_pp")) == "<ctype 'struct $2 * *'>"
def test_address_of_global_var():
ffi = FFI()
ffi.cdef("""
extern long bottom, bottoms[2];
long FetchRectBottom(void);
long FetchRectBottoms1(void);
#define FOOBAR 42
""")
lib = verify(ffi, "test_address_of_global_var", """
long bottom, bottoms[2];
long FetchRectBottom(void) { return bottom; }
long FetchRectBottoms1(void) { return bottoms[1]; }
#define FOOBAR 42
""")
lib.bottom = 300
assert lib.FetchRectBottom() == 300
lib.bottom += 1
assert lib.FetchRectBottom() == 301
lib.bottoms[1] = 500
assert lib.FetchRectBottoms1() == 500
lib.bottoms[1] += 2
assert lib.FetchRectBottoms1() == 502
#
p = ffi.addressof(lib, 'bottom')
assert ffi.typeof(p) == ffi.typeof("long *")
assert p[0] == 301
p[0] += 1
assert lib.FetchRectBottom() == 302
p = ffi.addressof(lib, 'bottoms')
assert ffi.typeof(p) == ffi.typeof("long(*)[2]")
assert p[0] == lib.bottoms
#
py.test.raises(AttributeError, ffi.addressof, lib, 'unknown_var')
py.test.raises(AttributeError, ffi.addressof, lib, "FOOBAR")
def test_defines__CFFI_():
# Check that we define the macro _CFFI_ automatically.
# It should be done before including Python.h, so that PyPy's Python.h
# can check for it.
ffi = FFI()
ffi.cdef("""
#define CORRECT 1
""")
lib = verify(ffi, "test_defines__CFFI_", """
#ifdef _CFFI_
# define CORRECT 1
#endif
""")
assert lib.CORRECT == 1
def test_unpack_args():
ffi = FFI()
ffi.cdef("void foo0(void); void foo1(int); void foo2(int, int);")
lib = verify(ffi, "test_unpack_args", """
void foo0(void) { }
void foo1(int x) { }
void foo2(int x, int y) { }
""")
assert 'foo0' in repr(lib.foo0)
assert 'foo1' in repr(lib.foo1)
assert 'foo2' in repr(lib.foo2)
lib.foo0()
lib.foo1(42)
lib.foo2(43, 44)
e1 = py.test.raises(TypeError, lib.foo0, 42)
e2 = py.test.raises(TypeError, lib.foo0, 43, 44)
e3 = py.test.raises(TypeError, lib.foo1)
e4 = py.test.raises(TypeError, lib.foo1, 43, 44)
e5 = py.test.raises(TypeError, lib.foo2)
e6 = py.test.raises(TypeError, lib.foo2, 42)
e7 = py.test.raises(TypeError, lib.foo2, 45, 46, 47)
def st1(s):
s = str(s)
if s.startswith("_CFFI_test_unpack_args.CompiledLib."):
s = s[len("_CFFI_test_unpack_args.CompiledLib."):]
return s
assert st1(e1.value) == "foo0() takes no arguments (1 given)"
assert st1(e2.value) == "foo0() takes no arguments (2 given)"
assert st1(e3.value) == "foo1() takes exactly one argument (0 given)"
assert st1(e4.value) == "foo1() takes exactly one argument (2 given)"
assert st1(e5.value) in ["foo2 expected 2 arguments, got 0",
"foo2() takes exactly 2 arguments (0 given)"]
assert st1(e6.value) in ["foo2 expected 2 arguments, got 1",
"foo2() takes exactly 2 arguments (1 given)"]
assert st1(e7.value) in ["foo2 expected 2 arguments, got 3",
"foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
ffi.cdef("long myfunc(long x);")
lib = verify(ffi, "test_addressof_function", """
char myfunc(char x) { return (char)(x + 42); }
""", ignore_warnings=True)
assert lib.myfunc(5) == 47
assert lib.myfunc(0xABC05) == 47
assert not isinstance(lib.myfunc, ffi.CData)
assert ffi.typeof(lib.myfunc) == ffi.typeof("long(*)(long)")
addr = ffi.addressof(lib, 'myfunc')
assert addr(5) == 47
assert addr(0xABC05) == 47
assert isinstance(addr, ffi.CData)
assert ffi.typeof(addr) == ffi.typeof("long(*)(long)")
def test_address_of_function_with_struct():
ffi = FFI()
ffi.cdef("struct foo_s { int x; }; long myfunc(struct foo_s);")
lib = verify(ffi, "test_addressof_function_with_struct", """
struct foo_s { int x; };
char myfunc(struct foo_s input) { return (char)(input.x + 42); }
""")
s = ffi.new("struct foo_s *", [5])[0]
assert lib.myfunc(s) == 47
assert not isinstance(lib.myfunc, ffi.CData)
assert ffi.typeof(lib.myfunc) == ffi.typeof("long(*)(struct foo_s)")
addr = ffi.addressof(lib, 'myfunc')
assert addr(s) == 47
assert isinstance(addr, ffi.CData)
assert ffi.typeof(addr) == ffi.typeof("long(*)(struct foo_s)")
def test_issue198():
ffi = FFI()
ffi.cdef("""
typedef struct{...;} opaque_t;
const opaque_t CONSTANT;
int toint(opaque_t);
""")
lib = verify(ffi, 'test_issue198', """
typedef int opaque_t;
#define CONSTANT ((opaque_t)42)
static int toint(opaque_t o) { return o; }
""")
def random_stuff():
pass
assert lib.toint(lib.CONSTANT) == 42
random_stuff()
assert lib.toint(lib.CONSTANT) == 42
def test_constant_is_not_a_compiler_constant():
ffi = FFI()
ffi.cdef("static const float almost_forty_two;")
lib = verify(ffi, 'test_constant_is_not_a_compiler_constant', """
static float f(void) { return 42.25; }
#define almost_forty_two (f())
""")
assert lib.almost_forty_two == 42.25
def test_constant_of_unknown_size():
ffi = FFI()
ffi.cdef("""
typedef ... opaque_t;
const opaque_t CONSTANT;
""")
lib = verify(ffi, 'test_constant_of_unknown_size',
"typedef int opaque_t;"
"const int CONSTANT = 42;")
e = py.test.raises(ffi.error, getattr, lib, 'CONSTANT')
assert str(e.value) == ("constant 'CONSTANT' is of "
"type 'opaque_t', whose size is not known")
def test_variable_of_unknown_size():
ffi = FFI()
ffi.cdef("""
typedef ... opaque_t;
extern opaque_t globvar;
""")
lib = verify(ffi, 'test_variable_of_unknown_size', """
typedef char opaque_t[6];
opaque_t globvar = "hello";
""")
# can't read or write it at all
e = py.test.raises(TypeError, getattr, lib, 'globvar')
assert str(e.value) in ["cdata 'opaque_t' is opaque",
"'opaque_t' is opaque or not completed yet"] #pypy
e = py.test.raises(TypeError, setattr, lib, 'globvar', [])
assert str(e.value) in ["'opaque_t' is opaque",
"'opaque_t' is opaque or not completed yet"] #pypy
# but we can get its address
p = ffi.addressof(lib, 'globvar')
assert ffi.typeof(p) == ffi.typeof('opaque_t *')
assert ffi.string(ffi.cast("char *", p), 8) == b"hello"
def test_constant_of_value_unknown_to_the_compiler():
extra_c_source = udir.join(
'extra_test_constant_of_value_unknown_to_the_compiler.c')
extra_c_source.write('const int external_foo = 42;\n')
ffi = FFI()
ffi.cdef("const int external_foo;")
lib = verify(ffi, 'test_constant_of_value_unknown_to_the_compiler', """
extern const int external_foo;
""", sources=[str(extra_c_source)])
assert lib.external_foo == 42
def test_dotdot_in_source_file_names():
extra_c_source = udir.join(
'extra_test_dotdot_in_source_file_names.c')
extra_c_source.write('const int external_foo = 42;\n')
ffi = FFI()
ffi.cdef("const int external_foo;")
lib = verify(ffi, 'test_dotdot_in_source_file_names', """
extern const int external_foo;
""", sources=[os.path.join(os.path.dirname(str(extra_c_source)),
'foobar', '..',
os.path.basename(str(extra_c_source)))])
assert lib.external_foo == 42
def test_call_with_incomplete_structs():
ffi = FFI()
ffi.cdef("typedef struct {...;} foo_t; "
"extern foo_t myglob; "
"foo_t increment(foo_t s); "
"double getx(foo_t s);")
lib = verify(ffi, 'test_call_with_incomplete_structs', """
typedef double foo_t;
double myglob = 42.5;
double getx(double x) { return x; }
double increment(double x) { return x + 1; }
""")
assert lib.getx(lib.myglob) == 42.5
assert lib.getx(lib.increment(lib.myglob)) == 43.5
def test_struct_array_guess_length_2():
ffi = FFI()
ffi.cdef("struct foo_s { int a[...][...]; };")
lib = verify(ffi, 'test_struct_array_guess_length_2',
"struct foo_s { int x; int a[5][8]; int y; };")
assert ffi.sizeof('struct foo_s') == 42 * ffi.sizeof('int')
s = ffi.new("struct foo_s *")
assert ffi.typeof(s.a) == ffi.typeof("int[5][8]")
assert ffi.sizeof(s.a) == 40 * ffi.sizeof('int')
assert s.a[4][7] == 0
with pytest.raises(IndexError):
s.a[4][8]
with pytest.raises(IndexError):
s.a[5][0]
assert ffi.typeof(s.a) == ffi.typeof("int[5][8]")
assert ffi.typeof(s.a[0]) == ffi.typeof("int[8]")
def test_struct_array_guess_length_3():
ffi = FFI()
ffi.cdef("struct foo_s { int a[][...]; };")
lib = verify(ffi, 'test_struct_array_guess_length_3',
"struct foo_s { int x; int a[5][7]; int y; };")
assert ffi.sizeof('struct foo_s') == 37 * ffi.sizeof('int')
s = ffi.new("struct foo_s *")
assert ffi.typeof(s.a) == ffi.typeof("int[][7]")
assert s.a[4][6] == 0
with pytest.raises(IndexError):
s.a[4][7]
assert ffi.typeof(s.a[0]) == ffi.typeof("int[7]")
def test_global_var_array_2():
ffi = FFI()
ffi.cdef("extern int a[...][...];")
lib = verify(ffi, 'test_global_var_array_2', 'int a[10][8];')
lib.a[9][7] = 123456
assert lib.a[9][7] == 123456
with pytest.raises(IndexError):
lib.a[0][8]
with pytest.raises(IndexError):
lib.a[10][0]
assert ffi.typeof(lib.a) == ffi.typeof("int[10][8]")
assert ffi.typeof(lib.a[0]) == ffi.typeof("int[8]")
def test_global_var_array_3():
ffi = FFI()
ffi.cdef("extern int a[][...];")
lib = verify(ffi, 'test_global_var_array_3', 'int a[10][8];')
lib.a[9][7] = 123456
assert lib.a[9][7] == 123456
with pytest.raises(IndexError):
lib.a[0][8]
assert ffi.typeof(lib.a) == ffi.typeof("int(*)[8]")
assert ffi.typeof(lib.a[0]) == ffi.typeof("int[8]")
def test_global_var_array_4():
ffi = FFI()
ffi.cdef("extern int a[10][...];")
lib = verify(ffi, 'test_global_var_array_4', 'int a[10][8];')
lib.a[9][7] = 123456
assert lib.a[9][7] == 123456
with pytest.raises(IndexError):
lib.a[0][8]
with pytest.raises(IndexError):
lib.a[10][8]
assert ffi.typeof(lib.a) == ffi.typeof("int[10][8]")
assert ffi.typeof(lib.a[0]) == ffi.typeof("int[8]")
def test_some_integer_type():
ffi = FFI()
ffi.cdef("""
typedef int... foo_t;
typedef unsigned long... bar_t;
typedef struct { foo_t a, b; } mystruct_t;
foo_t foobar(bar_t, mystruct_t);
static const bar_t mu = -20;
static const foo_t nu = 20;
""")
lib = verify(ffi, 'test_some_integer_type', """
typedef unsigned long long foo_t;
typedef short bar_t;
typedef struct { foo_t a, b; } mystruct_t;
static foo_t foobar(bar_t x, mystruct_t s) {
return (foo_t)x + s.a + s.b;
}
static const bar_t mu = -20;
static const foo_t nu = 20;
""")
assert ffi.sizeof("foo_t") == ffi.sizeof("unsigned long long")
assert ffi.sizeof("bar_t") == ffi.sizeof("short")
maxulonglong = 2 ** 64 - 1
assert int(ffi.cast("foo_t", -1)) == maxulonglong
assert int(ffi.cast("bar_t", -1)) == -1
assert lib.foobar(-1, [0, 0]) == maxulonglong
assert lib.foobar(2 ** 15 - 1, [0, 0]) == 2 ** 15 - 1
assert lib.foobar(10, [20, 31]) == 61
assert lib.foobar(0, [0, maxulonglong]) == maxulonglong
py.test.raises(OverflowError, lib.foobar, 2 ** 15, [0, 0])
py.test.raises(OverflowError, lib.foobar, -(2 ** 15) - 1, [0, 0])
py.test.raises(OverflowError, ffi.new, "mystruct_t *", [0, -1])
assert lib.mu == -20
assert lib.nu == 20
def test_some_float_type():
ffi = FFI()
ffi.cdef("""
typedef double... foo_t;
typedef float... bar_t;
foo_t sum(foo_t[]);
bar_t neg(bar_t);
""")
lib = verify(ffi, 'test_some_float_type', """
typedef float foo_t;
static foo_t sum(foo_t x[]) { return x[0] + x[1]; }
typedef double bar_t;
static double neg(double x) { return -x; }
""")
assert lib.sum([40.0, 2.25]) == 42.25
assert lib.sum([12.3, 45.6]) != 12.3 + 45.6 # precision loss
assert lib.neg(12.3) == -12.3 # no precision loss
assert ffi.sizeof("foo_t") == ffi.sizeof("float")
assert ffi.sizeof("bar_t") == ffi.sizeof("double")
def test_some_float_invalid_1():
ffi = FFI()
py.test.raises((FFIError, # with pycparser <= 2.17
CDefError), # with pycparser >= 2.18
ffi.cdef, "typedef long double... foo_t;")
def test_some_float_invalid_2():
ffi = FFI()
ffi.cdef("typedef double... foo_t; foo_t neg(foo_t);")
lib = verify(ffi, 'test_some_float_invalid_2', """
typedef unsigned long foo_t;
foo_t neg(foo_t x) { return -x; }
""")
e = py.test.raises(ffi.error, getattr, lib, 'neg')
assert str(e.value) == ("primitive floating-point type with an unexpected "
"size (or not a float type at all)")
def test_some_float_invalid_3():
ffi = FFI()
ffi.cdef("typedef double... foo_t; foo_t neg(foo_t);")
lib = verify(ffi, 'test_some_float_invalid_3', """
typedef long double foo_t;
foo_t neg(foo_t x) { return -x; }
""", ignore_warnings=True)
if ffi.sizeof("long double") == ffi.sizeof("double"):
assert lib.neg(12.3) == -12.3
else:
e = py.test.raises(ffi.error, getattr, lib, 'neg')
assert str(e.value) == ("primitive floating-point type is "
"'long double', not supported for now with "
"the syntax 'typedef double... xxx;'")
def test_issue200():
ffi = FFI()
ffi.cdef("""
typedef void (function_t)(void*);
void function(void *);
""")
lib = verify(ffi, 'test_issue200', """
static void function(void *p) { (void)p; }
""")
ffi.typeof('function_t*')
lib.function(ffi.NULL)
# assert did not crash
def test_alignment_of_longlong():
ffi = FFI()
x1 = ffi.alignof('unsigned long long')
assert x1 in [4, 8]
ffi.cdef("struct foo_s { unsigned long long x; };")
lib = verify(ffi, 'test_alignment_of_longlong',
"struct foo_s { unsigned long long x; };")
assert ffi.alignof('unsigned long long') == x1
assert ffi.alignof('struct foo_s') == x1
def test_import_from_lib():
ffi = FFI()
ffi.cdef("int mybar(int); static int myvar;\n#define MYFOO ...")
lib = verify(ffi, 'test_import_from_lib',
"#define MYFOO 42\n"
"static int mybar(int x) { return x + 1; }\n"
"static int myvar = -5;")
assert sys.modules['_CFFI_test_import_from_lib'].lib is lib
assert sys.modules['_CFFI_test_import_from_lib.lib'] is lib
from _CFFI_test_import_from_lib.lib import MYFOO
assert MYFOO == 42
assert hasattr(lib, '__dict__')
assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar'
assert lib.__name__ == '_CFFI_test_import_from_lib.lib'
assert lib.__class__ is type(sys) # !! hack for help()
def test_macro_var_callback():
ffi = FFI()
ffi.cdef("extern int my_value; extern int *(*get_my_value)(void);")
lib = verify(ffi, 'test_macro_var_callback',
"int *(*get_my_value)(void);\n"
"#define my_value (*get_my_value())")
#
values = ffi.new("int[50]")
def it():
for i in range(50):
yield i
it = it()
#
@ffi.callback("int *(*)(void)")
def get_my_value():
for nextvalue in it:
return values + nextvalue
lib.get_my_value = get_my_value
#
values[0] = 41
assert lib.my_value == 41 # [0]
p = ffi.addressof(lib, 'my_value') # [1]
assert p == values + 1
assert p[-1] == 41
assert p[+1] == 0
lib.my_value = 42 # [2]
assert values[2] == 42
assert p[-1] == 41
assert p[+1] == 42
#
# if get_my_value raises or returns nonsense, the exception is printed
# to stderr like with any callback, but then the C expression 'my_value'
# expand to '*NULL'. We assume here that '&my_value' will return NULL
# without segfaulting, and check for NULL when accessing the variable.
@ffi.callback("int *(*)(void)")
def get_my_value():
raise LookupError
lib.get_my_value = get_my_value
py.test.raises(ffi.error, getattr, lib, 'my_value')
py.test.raises(ffi.error, setattr, lib, 'my_value', 50)
py.test.raises(ffi.error, ffi.addressof, lib, 'my_value')
@ffi.callback("int *(*)(void)")
def get_my_value():
return "hello"
lib.get_my_value = get_my_value
py.test.raises(ffi.error, getattr, lib, 'my_value')
e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50)
assert str(e.value) == "global variable 'my_value' is at address NULL"
def test_const_fields():
ffi = FFI()
ffi.cdef("""struct foo_s { const int a; void *const b; };""")
lib = verify(ffi, 'test_const_fields', """
struct foo_s { const int a; void *const b; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'a'
assert foo_s.fields[0][1].type is ffi.typeof("int")
assert foo_s.fields[1][0] == 'b'
assert foo_s.fields[1][1].type is ffi.typeof("void *")
def test_restrict_fields():
ffi = FFI()
ffi.cdef("""struct foo_s { void * restrict b; };""")
lib = verify(ffi, 'test_restrict_fields', """
struct foo_s { void * __restrict b; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'b'
assert foo_s.fields[0][1].type is ffi.typeof("void *")
def test_volatile_fields():
ffi = FFI()
ffi.cdef("""struct foo_s { void * volatile b; };""")
lib = verify(ffi, 'test_volatile_fields', """
struct foo_s { void * volatile b; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'b'
assert foo_s.fields[0][1].type is ffi.typeof("void *")
def test_const_array_fields():
ffi = FFI()
ffi.cdef("""struct foo_s { const int a[4]; };""")
lib = verify(ffi, 'test_const_array_fields', """
struct foo_s { const int a[4]; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'a'
assert foo_s.fields[0][1].type is ffi.typeof("int[4]")
def test_const_array_fields_varlength():
ffi = FFI()
ffi.cdef("""struct foo_s { const int a[]; ...; };""")
lib = verify(ffi, 'test_const_array_fields_varlength', """
struct foo_s { const int a[4]; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'a'
assert foo_s.fields[0][1].type is ffi.typeof("int[]")
def test_const_array_fields_unknownlength():
ffi = FFI()
ffi.cdef("""struct foo_s { const int a[...]; ...; };""")
lib = verify(ffi, 'test_const_array_fields_unknownlength', """
struct foo_s { const int a[4]; };""")
foo_s = ffi.typeof("struct foo_s")
assert foo_s.fields[0][0] == 'a'
assert foo_s.fields[0][1].type is ffi.typeof("int[4]")
def test_const_function_args():
ffi = FFI()
ffi.cdef("""int foobar(const int a, const int *b, const int c[]);""")
lib = verify(ffi, 'test_const_function_args', """
int foobar(const int a, const int *b, const int c[]) {
return a + *b + *c;
}
""")
assert lib.foobar(100, ffi.new("int *", 40), ffi.new("int *", 2)) == 142
def test_const_function_type_args():
ffi = FFI()
ffi.cdef("""extern int(*foobar)(const int a,const int*b,const int c[]);""")
lib = verify(ffi, 'test_const_function_type_args', """
int (*foobar)(const int a, const int *b, const int c[]);
""")
t = ffi.typeof(lib.foobar)
assert t.args[0] is ffi.typeof("int")
assert t.args[1] is ffi.typeof("int *")
assert t.args[2] is ffi.typeof("int *")
def test_const_constant():
ffi = FFI()
ffi.cdef("""struct foo_s { int x,y; }; const struct foo_s myfoo;""")
lib = verify(ffi, 'test_const_constant', """
struct foo_s { int x,y; }; const struct foo_s myfoo = { 40, 2 };
""")
assert lib.myfoo.x == 40
assert lib.myfoo.y == 2
def test_const_via_typedef():
ffi = FFI()
ffi.cdef("""typedef const int const_t; const_t aaa;""")
lib = verify(ffi, 'test_const_via_typedef', """
typedef const int const_t;
#define aaa 42
""")
assert lib.aaa == 42
with pytest.raises(AttributeError):
lib.aaa = 43
def test_win32_calling_convention_0():
ffi = FFI()
ffi.cdef("""
int call1(int(__cdecl *cb)(int));
int (*const call2)(int(__stdcall *cb)(int));
""")
lib = verify(ffi, 'test_win32_calling_convention_0', r"""
#ifndef _MSC_VER
# define __stdcall /* nothing */
#endif
int call1(int(*cb)(int)) {
int i, result = 0;
//printf("call1: cb = %p\n", cb);
for (i = 0; i < 1000; i++)
result += cb(i);
//printf("result = %d\n", result);
return result;
}
int call2(int(__stdcall *cb)(int)) {
int i, result = 0;
//printf("call2: cb = %p\n", cb);
for (i = 0; i < 1000; i++)
result += cb(-i);
//printf("result = %d\n", result);
return result;
}
""")
@ffi.callback("int(int)")
def cb1(x):
return x * 2
@ffi.callback("int __stdcall(int)")
def cb2(x):
return x * 3
res = lib.call1(cb1)
assert res == 500*999*2
assert res == ffi.addressof(lib, 'call1')(cb1)
res = lib.call2(cb2)
assert res == -500*999*3
assert res == ffi.addressof(lib, 'call2')(cb2)
if sys.platform == 'win32' and not sys.maxsize > 2**32:
assert '__stdcall' in str(ffi.typeof(cb2))
assert '__stdcall' not in str(ffi.typeof(cb1))
py.test.raises(TypeError, lib.call1, cb2)
py.test.raises(TypeError, lib.call2, cb1)
else:
assert '__stdcall' not in str(ffi.typeof(cb2))
assert ffi.typeof(cb2) is ffi.typeof(cb1)
def test_win32_calling_convention_1():
ffi = FFI()
ffi.cdef("""
int __cdecl call1(int(__cdecl *cb)(int));
int __stdcall call2(int(__stdcall *cb)(int));
int (__cdecl *const cb1)(int);
int (__stdcall *const cb2)(int);
""")
lib = verify(ffi, 'test_win32_calling_convention_1', r"""
#ifndef _MSC_VER
# define __cdecl
# define __stdcall
#endif
int __cdecl cb1(int x) { return x * 2; }
int __stdcall cb2(int x) { return x * 3; }
int __cdecl call1(int(__cdecl *cb)(int)) {
int i, result = 0;
//printf("here1\n");
//printf("cb = %p, cb1 = %p\n", cb, (void *)cb1);
for (i = 0; i < 1000; i++)
result += cb(i);
//printf("result = %d\n", result);
return result;
}
int __stdcall call2(int(__stdcall *cb)(int)) {
int i, result = 0;
//printf("here1\n");
//printf("cb = %p, cb2 = %p\n", cb, (void *)cb2);
for (i = 0; i < 1000; i++)
result += cb(-i);
//printf("result = %d\n", result);
return result;
}
""")
#print '<<< cb1 =', ffi.addressof(lib, 'cb1')
ptr_call1 = ffi.addressof(lib, 'call1')
assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2
assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2
#print '<<< cb2 =', ffi.addressof(lib, 'cb2')
ptr_call2 = ffi.addressof(lib, 'call2')
assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3
assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3
#print '<<< done'
def test_win32_calling_convention_2():
# any mistake in the declaration of plain function (including the
# precise argument types and, here, the calling convention) are
# automatically corrected. But this does not apply to the 'cb'
# function pointer argument.
ffi = FFI()
ffi.cdef("""
int __stdcall call1(int(__cdecl *cb)(int));
int __cdecl call2(int(__stdcall *cb)(int));
int (__cdecl *const cb1)(int);
int (__stdcall *const cb2)(int);
""")
lib = verify(ffi, 'test_win32_calling_convention_2', """
#ifndef _MSC_VER
# define __cdecl
# define __stdcall
#endif
int __cdecl call1(int(__cdecl *cb)(int)) {
int i, result = 0;
for (i = 0; i < 1000; i++)
result += cb(i);
return result;
}
int __stdcall call2(int(__stdcall *cb)(int)) {
int i, result = 0;
for (i = 0; i < 1000; i++)
result += cb(-i);
return result;
}
int __cdecl cb1(int x) { return x * 2; }
int __stdcall cb2(int x) { return x * 3; }
""")
ptr_call1 = ffi.addressof(lib, 'call1')
ptr_call2 = ffi.addressof(lib, 'call2')
if sys.platform == 'win32' and not sys.maxsize > 2**32:
py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2'))
py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2'))
py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1'))
py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1'))
assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2
assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2
assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3
assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3
def test_win32_calling_convention_3():
ffi = FFI()
ffi.cdef("""
struct point { int x, y; };
int (*const cb1)(struct point);
int (__stdcall *const cb2)(struct point);
struct point __stdcall call1(int(*cb)(struct point));
struct point call2(int(__stdcall *cb)(struct point));
""")
lib = verify(ffi, 'test_win32_calling_convention_3', r"""
#ifndef _MSC_VER
# define __cdecl
# define __stdcall
#endif
struct point { int x, y; };
int cb1(struct point pt) { return pt.x + 10 * pt.y; }
int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; }
struct point __stdcall call1(int(__cdecl *cb)(struct point)) {
int i;
struct point result = { 0, 0 };
//printf("here1\n");
//printf("cb = %p, cb1 = %p\n", cb, (void *)cb1);
for (i = 0; i < 1000; i++) {
struct point p = { i, -i };
int r = cb(p);
result.x += r;
result.y -= r;
}
return result;
}
struct point __cdecl call2(int(__stdcall *cb)(struct point)) {
int i;
struct point result = { 0, 0 };
for (i = 0; i < 1000; i++) {
struct point p = { -i, i };
int r = cb(p);
result.x += r;
result.y -= r;
}
return result;
}
""")
ptr_call1 = ffi.addressof(lib, 'call1')
ptr_call2 = ffi.addressof(lib, 'call2')
if sys.platform == 'win32' and not sys.maxsize > 2**32:
py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2'))
py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2'))
py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1'))
py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1'))
pt = lib.call1(ffi.addressof(lib, 'cb1'))
assert (pt.x, pt.y) == (-9*500*999, 9*500*999)
pt = ptr_call1(ffi.addressof(lib, 'cb1'))
assert (pt.x, pt.y) == (-9*500*999, 9*500*999)
pt = lib.call2(ffi.addressof(lib, 'cb2'))
assert (pt.x, pt.y) == (99*500*999, -99*500*999)
pt = ptr_call2(ffi.addressof(lib, 'cb2'))
assert (pt.x, pt.y) == (99*500*999, -99*500*999)
def test_extern_python_1():
import warnings
ffi = FFI()
with warnings.catch_warnings(record=True) as log:
ffi.cdef("""
extern "Python" {
int bar(int, int);
void baz(int, int);
int bok(void);
void boz(void);
}
""")
assert len(log) == 0, "got a warning: %r" % (log,)
lib = verify(ffi, 'test_extern_python_1', """
static void baz(int, int); /* forward */
""")
assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)")
with FdWriteCapture() as f:
res = lib.bar(4, 5)
assert res == 0
assert f.getvalue() == (
b"extern \"Python\": function _CFFI_test_extern_python_1.bar() called, "
b"but no code was attached "
b"to it yet with @ffi.def_extern(). Returning 0.\n")
@ffi.def_extern("bar")
def my_bar(x, y):
seen.append(("Bar", x, y))
return x * y
assert my_bar != lib.bar
seen = []
res = lib.bar(6, 7)
assert seen == [("Bar", 6, 7)]
assert res == 42
def baz(x, y):
seen.append(("Baz", x, y))
baz1 = ffi.def_extern()(baz)
assert baz1 is baz
seen = []
baz(long(40), long(4))
res = lib.baz(long(50), long(8))
assert res is None
assert seen == [("Baz", 40, 4), ("Baz", 50, 8)]
assert type(seen[0][1]) is type(seen[0][2]) is long
assert type(seen[1][1]) is type(seen[1][2]) is int
@ffi.def_extern(name="bok")
def bokk():
seen.append("Bok")
return 42
seen = []
assert lib.bok() == 42
assert seen == ["Bok"]
@ffi.def_extern()
def boz():
seen.append("Boz")
seen = []
assert lib.boz() is None
assert seen == ["Boz"]
def test_extern_python_bogus_name():
ffi = FFI()
ffi.cdef("extern int abc;")
lib = verify(ffi, 'test_extern_python_bogus_name', "int abc;")
def fn():
pass
py.test.raises(ffi.error, ffi.def_extern("unknown_name"), fn)
py.test.raises(ffi.error, ffi.def_extern("abc"), fn)
assert lib.abc == 0
e = py.test.raises(ffi.error, ffi.def_extern("abc"), fn)
assert str(e.value) == ("ffi.def_extern('abc'): no 'extern \"Python\"' "
"function with this name")
e = py.test.raises(ffi.error, ffi.def_extern(), fn)
assert str(e.value) == ("ffi.def_extern('fn'): no 'extern \"Python\"' "
"function with this name")
#
py.test.raises(TypeError, ffi.def_extern(42), fn)
py.test.raises((TypeError, AttributeError), ffi.def_extern(), "foo")
class X:
pass
x = X()
x.__name__ = x
py.test.raises(TypeError, ffi.def_extern(), x)
def test_extern_python_bogus_result_type():
ffi = FFI()
ffi.cdef("""extern "Python" void bar(int);""")
lib = verify(ffi, 'test_extern_python_bogus_result_type', "")
#
@ffi.def_extern()
def bar(n):
return n * 10
with StdErrCapture() as f:
res = lib.bar(321)
assert res is None
assert f.getvalue() == (
"From cffi callback %r:\n" % (bar,) +
"Trying to convert the result back to C:\n"
"TypeError: callback with the return type 'void' must return None\n")
def test_extern_python_redefine():
ffi = FFI()
ffi.cdef("""extern "Python" int bar(int);""")
lib = verify(ffi, 'test_extern_python_redefine', "")
#
@ffi.def_extern()
def bar(n):
return n * 10
assert lib.bar(42) == 420
#
@ffi.def_extern()
def bar(n):
return -n
assert lib.bar(42) == -42
def test_extern_python_struct():
ffi = FFI()
ffi.cdef("""
struct foo_s { int a, b, c; };
extern "Python" int bar(int, struct foo_s, int);
extern "Python" { struct foo_s baz(int, int);
struct foo_s bok(void); }
""")
lib = verify(ffi, 'test_extern_python_struct',
"struct foo_s { int a, b, c; };")
#
@ffi.def_extern()
def bar(x, s, z):
return x + s.a + s.b + s.c + z
res = lib.bar(1000, [1001, 1002, 1004], 1008)
assert res == 5015
#
@ffi.def_extern()
def baz(x, y):
return [x + y, x - y, x * y]
res = lib.baz(1000, 42)
assert res.a == 1042
assert res.b == 958
assert res.c == 42000
#
@ffi.def_extern()
def bok():
return [10, 20, 30]
res = lib.bok()
assert [res.a, res.b, res.c] == [10, 20, 30]
def test_extern_python_long_double():
ffi = FFI()
ffi.cdef("""
extern "Python" int bar(int, long double, int);
extern "Python" long double baz(int, int);
extern "Python" long double bok(void);
""")
lib = verify(ffi, 'test_extern_python_long_double', "")
#
@ffi.def_extern()
def bar(x, l, z):
seen.append((x, l, z))
return 6
seen = []
lib.bar(10, 3.5, 20)
expected = ffi.cast("long double", 3.5)
assert repr(seen) == repr([(10, expected, 20)])
#
@ffi.def_extern()
def baz(x, z):
assert x == 10 and z == 20
return expected
res = lib.baz(10, 20)
assert repr(res) == repr(expected)
#
@ffi.def_extern()
def bok():
return expected
res = lib.bok()
assert repr(res) == repr(expected)
def test_extern_python_signature():
ffi = FFI()
lib = verify(ffi, 'test_extern_python_signature', "")
py.test.raises(TypeError, ffi.def_extern(425), None)
py.test.raises(TypeError, ffi.def_extern, 'a', 'b', 'c', 'd')
def test_extern_python_errors():
ffi = FFI()
ffi.cdef("""
extern "Python" int bar(int);
""")
lib = verify(ffi, 'test_extern_python_errors', "")
seen = []
def oops(*args):
seen.append(args)
@ffi.def_extern(onerror=oops)
def bar(x):
return x + ""
assert lib.bar(10) == 0
@ffi.def_extern(name="bar", onerror=oops, error=-66)
def bar2(x):
return x + ""
assert lib.bar(10) == -66
assert len(seen) == 2
exc, val, tb = seen[0]
assert exc is TypeError
assert isinstance(val, TypeError)
assert tb.tb_frame.f_code.co_name == "bar"
exc, val, tb = seen[1]
assert exc is TypeError
assert isinstance(val, TypeError)
assert tb.tb_frame.f_code.co_name == "bar2"
#
# a case where 'onerror' is not callable
py.test.raises(TypeError, ffi.def_extern(name='bar', onerror=42),
lambda x: x)
def test_extern_python_stdcall():
ffi = FFI()
ffi.cdef("""
extern "Python" int __stdcall foo(int);
extern "Python" int WINAPI bar(int);
static int (__stdcall * mycb1)(int);
static int indirect_call(int);
""")
lib = verify(ffi, 'test_extern_python_stdcall', """
#ifndef _MSC_VER
# define __stdcall
#endif
static int (__stdcall * mycb1)(int);
static int indirect_call(int x) {
return mycb1(x);
}
""")
#
@ffi.def_extern()
def foo(x):
return x + 42
@ffi.def_extern()
def bar(x):
return x + 43
assert lib.foo(100) == 142
assert lib.bar(100) == 143
lib.mycb1 = lib.foo
assert lib.mycb1(200) == 242
assert lib.indirect_call(300) == 342
def test_extern_python_plus_c():
ffi = FFI()
ffi.cdef("""
extern "Python+C" int foo(int);
extern "C +\tPython" int bar(int);
int call_me(int);
""")
lib = verify(ffi, 'test_extern_python_plus_c', """
int foo(int);
#ifdef __GNUC__
__attribute__((visibility("hidden")))
#endif
int bar(int);
static int call_me(int x) {
return foo(x) - bar(x);
}
""")
#
@ffi.def_extern()
def foo(x):
return x * 42
@ffi.def_extern()
def bar(x):
return x * 63
assert lib.foo(100) == 4200
assert lib.bar(100) == 6300
assert lib.call_me(100) == -2100
def test_introspect_function():
ffi = FFI()
ffi.cdef("float f1(double);")
lib = verify(ffi, 'test_introspect_function', """
float f1(double x) { return (float)x; }
""")
assert dir(lib) == ['f1']
FUNC = ffi.typeof(lib.f1)
assert FUNC.kind == 'function'
assert FUNC.args[0].cname == 'double'
assert FUNC.result.cname == 'float'
assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC
def test_introspect_global_var():
ffi = FFI()
ffi.cdef("extern float g1;")
lib = verify(ffi, 'test_introspect_global_var', """
float g1;
""")
assert dir(lib) == ['g1']
FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1'))
assert FLOATPTR.kind == 'pointer'
assert FLOATPTR.item.cname == 'float'
def test_introspect_global_var_array():
ffi = FFI()
ffi.cdef("extern float g1[100];")
lib = verify(ffi, 'test_introspect_global_var_array', """
float g1[100];
""")
assert dir(lib) == ['g1']
FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1'))
assert FLOATARRAYPTR.kind == 'pointer'
assert FLOATARRAYPTR.item.kind == 'array'
assert FLOATARRAYPTR.item.length == 100
assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item
def test_introspect_integer_const():
ffi = FFI()
ffi.cdef("#define FOO 42")
lib = verify(ffi, 'test_introspect_integer_const', """
#define FOO 42
""")
assert dir(lib) == ['FOO']
assert lib.FOO == ffi.integer_const('FOO') == 42
def test_introspect_typedef():
ffi = FFI()
ffi.cdef("typedef int foo_t;")
lib = verify(ffi, 'test_introspect_typedef', """
typedef int foo_t;
""")
assert ffi.list_types() == (['foo_t'], [], [])
assert ffi.typeof('foo_t').kind == 'primitive'
assert ffi.typeof('foo_t').cname == 'int'
def test_introspect_typedef_multiple():
ffi = FFI()
ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;")
lib = verify(ffi, 'test_introspect_typedef_multiple', """
typedef signed char a_t, c_t, g_t, b_t;
""")
assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], [])
def test_introspect_struct():
ffi = FFI()
ffi.cdef("struct foo_s { int a; };")
lib = verify(ffi, 'test_introspect_struct', """
struct foo_s { int a; };
""")
assert ffi.list_types() == ([], ['foo_s'], [])
assert ffi.typeof('struct foo_s').kind == 'struct'
assert ffi.typeof('struct foo_s').cname == 'struct foo_s'
def test_introspect_union():
ffi = FFI()
ffi.cdef("union foo_s { int a; };")
lib = verify(ffi, 'test_introspect_union', """
union foo_s { int a; };
""")
assert ffi.list_types() == ([], [], ['foo_s'])
assert ffi.typeof('union foo_s').kind == 'union'
assert ffi.typeof('union foo_s').cname == 'union foo_s'
def test_introspect_struct_and_typedef():
ffi = FFI()
ffi.cdef("typedef struct { int a; } foo_t;")
lib = verify(ffi, 'test_introspect_struct_and_typedef', """
typedef struct { int a; } foo_t;
""")
assert ffi.list_types() == (['foo_t'], [], [])
assert ffi.typeof('foo_t').kind == 'struct'
assert ffi.typeof('foo_t').cname == 'foo_t'
def test_introspect_included_type():
SOURCE = """
typedef signed char schar_t;
struct sint_t { int x; };
"""
ffi1 = FFI()
ffi1.cdef(SOURCE)
ffi2 = FFI()
ffi2.include(ffi1)
verify(ffi1, "test_introspect_included_type_parent", SOURCE)
verify(ffi2, "test_introspect_included_type", SOURCE)
assert ffi1.list_types() == ffi2.list_types() == (
['schar_t'], ['sint_t'], [])
def test_introspect_order():
ffi = FFI()
ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
verify(ffi, "test_introspect_order", """
union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;
union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""")
assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
['CFFIa', 'CFFIcc', 'CFFIccc'],
['CFFIaa', 'CFFIaaa', 'CFFIg'])
def test_bool_in_cpp():
# this works when compiled as C, but in cffi < 1.7 it fails as C++
ffi = FFI()
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() is True
def test_bool_in_cpp_2():
ffi = FFI()
ffi.cdef('int add(int a, int b);')
lib = verify(ffi, "test_bool_bug_cpp", '''
typedef bool _Bool; /* there is a Windows header with this line */
int add(int a, int b)
{
return a + b;
}''', source_extension='.cpp')
c = lib.add(2, 3)
assert c == 5
def test_struct_field_opaque():
ffi = FFI()
ffi.cdef("struct a { struct b b; };")
e = py.test.raises(TypeError, verify,
ffi, "test_struct_field_opaque", "?")
assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
" type (not declared in cdef())")
ffi = FFI()
ffi.cdef("struct a { struct b b[2]; };")
e = py.test.raises(TypeError, verify,
ffi, "test_struct_field_opaque", "?")
assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
" type (not declared in cdef())")
ffi = FFI()
ffi.cdef("struct a { struct b b[]; };")
e = py.test.raises(TypeError, verify,
ffi, "test_struct_field_opaque", "?")
assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
" type (not declared in cdef())")
def test_function_arg_opaque():
py.test.skip("can currently declare a function with an opaque struct "
"as argument, but AFAICT it's impossible to call it later")
def test_function_returns_opaque():
ffi = FFI()
ffi.cdef("struct a foo(int);")
e = py.test.raises(TypeError, verify,
ffi, "test_function_returns_opaque", "?")
assert str(e.value) == ("function foo: 'struct a' is used as result type,"
" but is opaque")
def test_function_returns_union():
ffi = FFI()
ffi.cdef("union u1 { int a, b; }; union u1 f1(int);")
lib = verify(ffi, "test_function_returns_union", """
union u1 { int a, b; };
static union u1 f1(int x) { union u1 u; u.b = x; return u; }
""")
assert lib.f1(51).a == 51
def test_function_returns_partial_struct():
ffi = FFI()
ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);")
lib = verify(ffi, "test_function_returns_partial_struct", """
struct aaa { int b, a, c; };
static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; }
""")
assert lib.f1(52).a == 52
def test_function_returns_float_complex():
if sys.platform == 'win32':
py.test.skip("MSVC may not support _Complex")
ffi = FFI()
ffi.cdef("float _Complex f1(float a, float b);");
lib = verify(ffi, "test_function_returns_float_complex", """
#include <complex.h>
static float _Complex f1(float a, float b) { return a + I*2.0f*b; }
""", no_cpp=True) # <complex.h> fails on some systems with C++
result = lib.f1(1.25, 5.1)
assert type(result) == complex
assert result.real == 1.25 # exact
assert (result.imag != 2*5.1) and (abs(result.imag - 2*5.1) < 1e-5) # inexact
def test_function_returns_double_complex():
if sys.platform == 'win32':
py.test.skip("MSVC may not support _Complex")
ffi = FFI()
ffi.cdef("double _Complex f1(double a, double b);");
lib = verify(ffi, "test_function_returns_double_complex", """
#include <complex.h>
static double _Complex f1(double a, double b) { return a + I*2.0*b; }
""", no_cpp=True) # <complex.h> fails on some systems with C++
result = lib.f1(1.25, 5.1)
assert type(result) == complex
assert result.real == 1.25 # exact
assert result.imag == 2*5.1 # exact
def test_function_argument_float_complex():
if sys.platform == 'win32':
py.test.skip("MSVC may not support _Complex")
ffi = FFI()
ffi.cdef("float f1(float _Complex x);");
lib = verify(ffi, "test_function_argument_float_complex", """
#include <complex.h>
static float f1(float _Complex x) { return cabsf(x); }
""", no_cpp=True) # <complex.h> fails on some systems with C++
x = complex(12.34, 56.78)
result = lib.f1(x)
assert abs(result - abs(x)) < 1e-5
def test_function_argument_double_complex():
if sys.platform == 'win32':
py.test.skip("MSVC may not support _Complex")
ffi = FFI()
ffi.cdef("double f1(double _Complex);");
lib = verify(ffi, "test_function_argument_double_complex", """
#include <complex.h>
static double f1(double _Complex x) { return cabs(x); }
""", no_cpp=True) # <complex.h> fails on some systems with C++
x = complex(12.34, 56.78)
result = lib.f1(x)
assert abs(result - abs(x)) < 1e-11
def test_typedef_array_dotdotdot():
ffi = FFI()
ffi.cdef("""
typedef int foo_t[...], bar_t[...];
extern int gv[...];
typedef int mat_t[...][...];
typedef int vmat_t[][...];
""")
lib = verify(ffi, "test_typedef_array_dotdotdot", """
typedef int foo_t[50], bar_t[50];
int gv[23];
typedef int mat_t[6][7];
typedef int vmat_t[][8];
""")
assert ffi.sizeof("foo_t") == 50 * ffi.sizeof("int")
assert ffi.sizeof("bar_t") == 50 * ffi.sizeof("int")
assert len(ffi.new("foo_t")) == 50
assert len(ffi.new("bar_t")) == 50
assert ffi.sizeof(lib.gv) == 23 * ffi.sizeof("int")
assert ffi.sizeof("mat_t") == 6 * 7 * ffi.sizeof("int")
assert len(ffi.new("mat_t")) == 6
assert len(ffi.new("mat_t")[3]) == 7
py.test.raises(ffi.error, ffi.sizeof, "vmat_t")
p = ffi.new("vmat_t", 4)
assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int")
def test_call_with_custom_field_pos():
ffi = FFI()
ffi.cdef("""
struct foo { int x; ...; };
struct foo f(void);
struct foo g(int, ...);
""")
lib = verify(ffi, "test_call_with_custom_field_pos", """
struct foo { int y, x; };
struct foo f(void) {
struct foo s = { 40, 200 };
return s;
}
struct foo g(int a, ...) { return f(); }
""")
assert lib.f().x == 200
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
'ctype \'struct foo\' not supported as return value. It is a '
'struct declared with "...;", but the C calling convention may '
'depend on the missing fields; or, it contains anonymous '
'struct/unions. Such structs are only supported '
'as return value if the function is \'API mode\' and non-variadic '
'(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() '
'and not taking a final \'...\' argument)')
def test_call_with_nested_anonymous_struct():
if sys.platform == 'win32':
py.test.skip("needs a GCC extension")
ffi = FFI()
ffi.cdef("""
struct foo { int a; union { int b, c; }; };
struct foo f(void);
struct foo g(int, ...);
""")
lib = verify(ffi, "test_call_with_nested_anonymous_struct", """
struct foo { int a; union { int b, c; }; };
struct foo f(void) {
struct foo s;
s.a = 40;
s.b = 200;
return s;
}
struct foo g(int a, ...) { return f(); }
""")
assert lib.f().b == 200
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
'ctype \'struct foo\' not supported as return value. It is a '
'struct declared with "...;", but the C calling convention may '
'depend on the missing fields; or, it contains anonymous '
'struct/unions. Such structs are only supported '
'as return value if the function is \'API mode\' and non-variadic '
'(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() '
'and not taking a final \'...\' argument)')
def test_call_with_bitfield():
ffi = FFI()
ffi.cdef("""
struct foo { int x:5; };
struct foo f(void);
struct foo g(int, ...);
""")
lib = verify(ffi, "test_call_with_bitfield", """
struct foo { int x:5; };
struct foo f(void) {
struct foo s = { 11 };
return s;
}
struct foo g(int a, ...) { return f(); }
""")
assert lib.f().x == 11
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
"ctype 'struct foo' not supported as return value. It is a struct "
"with bit fields, which libffi does not support. Such structs are "
"only supported as return value if the function is 'API mode' and "
"non-variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder."
"set_source() and not taking a final '...' argument)")
def test_call_with_zero_length_field():
if sys.platform == 'win32':
py.test.skip("zero-length field not supported by MSVC")
ffi = FFI()
ffi.cdef("""
struct foo { int a; int x[0]; };
struct foo f(void);
struct foo g(int, ...);
""")
lib = verify(ffi, "test_call_with_zero_length_field", """
struct foo { int a; int x[0]; };
struct foo f(void) {
struct foo s = { 42 };
return s;
}
struct foo g(int a, ...) { return f(); }
""")
assert lib.f().a == 42
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
"ctype 'struct foo' not supported as return value. It is a "
"struct with a zero-length array, which libffi does not support."
" Such structs are only supported as return value if the function is "
"'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()"
"+ffibuilder.set_source() and not taking a final '...' argument)")
def test_call_with_union():
ffi = FFI()
ffi.cdef("""
union foo { int a; char b; };
union foo f(void);
union foo g(int, ...);
""")
lib = verify(ffi, "test_call_with_union", """
union foo { int a; char b; };
union foo f(void) {
union foo s = { 42 };
return s;
}
union foo g(int a, ...) { return f(); }
""")
assert lib.f().a == 42
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
"ctype 'union foo' not supported as return value by libffi. "
"Unions are only supported as return value if the function is "
"'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()"
"+ffibuilder.set_source() and not taking a final '...' argument)")
def test_call_with_packed_struct():
if sys.platform == 'win32':
py.test.skip("needs a GCC extension")
ffi = FFI()
ffi.cdef("""
struct foo { char y; int x; };
struct foo f(void);
struct foo g(int, ...);
""", packed=True)
lib = verify(ffi, "test_call_with_packed_struct", """
struct foo { char y; int x; } __attribute__((packed));
struct foo f(void) {
struct foo s = { 40, 200 };
return s;
}
struct foo g(int a, ...) {
struct foo s = { 41, 201 };
return s;
}
""")
assert ord(lib.f().y) == 40
assert lib.f().x == 200
e = py.test.raises(NotImplementedError, lib.g, 0)
assert str(e.value) == (
"ctype 'struct foo' not supported as return value. It is a "
"'packed' structure, with a different layout than expected by libffi."
" Such structs are only supported as return value if the function is "
"'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()"
"+ffibuilder.set_source() and not taking a final '...' argument)")
def test_pack_not_supported():
ffi = FFI()
ffi.cdef("""struct foo { char y; int x; };""", pack=2)
py.test.raises(NotImplementedError, verify,
ffi, "test_pack_not_supported", "")
def test_gcc_visibility_hidden():
if sys.platform == 'win32':
py.test.skip("test for gcc/clang")
ffi = FFI()
ffi.cdef("""
int f(int);
""")
lib = verify(ffi, "test_gcc_visibility_hidden", """
int f(int a) { return a + 40; }
""", extra_compile_args=['-fvisibility=hidden'])
assert lib.f(2) == 42
def test_override_default_definition():
ffi = FFI()
ffi.cdef("typedef long int16_t, char16_t;")
lib = verify(ffi, "test_override_default_definition", "")
assert ffi.typeof("int16_t") is ffi.typeof("char16_t") is ffi.typeof("long")
def test_char16_char32_type(no_cpp=False):
if no_cpp is False and sys.platform == "win32":
py.test.skip("aaaaaaa why do modern MSVC compilers still define "
"a very old __cplusplus value")
ffi = FFI()
ffi.cdef("""
char16_t foo_2bytes(char16_t);
char32_t foo_4bytes(char32_t);
""")
lib = verify(ffi, "test_char16_char32_type" + no_cpp * "_nocpp", """
#if !defined(__cplusplus) || (!defined(_LIBCPP_VERSION) && __cplusplus < 201103L)
typedef uint_least16_t char16_t;
typedef uint_least32_t char32_t;
#endif
char16_t foo_2bytes(char16_t a) { return (char16_t)(a + 42); }
char32_t foo_4bytes(char32_t a) { return (char32_t)(a + 42); }
""", no_cpp=no_cpp)
assert lib.foo_2bytes(u+'\u1234') == u+'\u125e'
assert lib.foo_4bytes(u+'\u1234') == u+'\u125e'
assert lib.foo_4bytes(u+'\U00012345') == u+'\U0001236f'
py.test.raises(TypeError, lib.foo_2bytes, u+'\U00012345')
py.test.raises(TypeError, lib.foo_2bytes, 1234)
py.test.raises(TypeError, lib.foo_4bytes, 1234)
def test_char16_char32_plain_c():
test_char16_char32_type(no_cpp=True)
def test_loader_spec():
ffi = FFI()
lib = verify(ffi, "test_loader_spec", "")
if sys.version_info < (3,):
assert not hasattr(lib, '__loader__')
assert not hasattr(lib, '__spec__')
else:
assert lib.__loader__ is None
assert lib.__spec__ is None
def test_realize_struct_error():
ffi = FFI()
ffi.cdef("""typedef ... foo_t; struct foo_s { void (*x)(foo_t); };""")
lib = verify(ffi, "test_realize_struct_error", """
typedef int foo_t; struct foo_s { void (*x)(foo_t); };
""")
py.test.raises(TypeError, ffi.new, "struct foo_s *")
def test_from_buffer_struct():
ffi = FFI()
ffi.cdef("""struct foo_s { int a, b; };""")
lib = verify(ffi, "test_from_buffer_struct_p", """
struct foo_s { int a, b; };
""")
p = ffi.new("struct foo_s *", [-219239, 58974983])
q = ffi.from_buffer("struct foo_s[]", ffi.buffer(p))
assert ffi.typeof(q) == ffi.typeof("struct foo_s[]")
assert len(q) == 1
assert q[0].a == p.a
assert q[0].b == p.b
assert q == p
q = ffi.from_buffer("struct foo_s *", ffi.buffer(p))
assert ffi.typeof(q) == ffi.typeof("struct foo_s *")
assert q.a == p.a
assert q.b == p.b
assert q[0].a == p.a
assert q[0].b == p.b
assert q == p
def test_unnamed_bitfield_1():
ffi = FFI()
ffi.cdef("""struct A { char : 1; };""")
lib = verify(ffi, "test_unnamed_bitfield_1", """
struct A { char : 1; };
""")
p = ffi.new("struct A *")
assert ffi.sizeof(p[0]) == 1
# Note: on gcc, the type name is ignored for anonymous bitfields
# and that's why the result is 1. On MSVC, the result is
# sizeof("char") which is also 1.
def test_unnamed_bitfield_2():
ffi = FFI()
ffi.cdef("""struct A {
short c : 1; short : 1; short d : 1; short : 1; };""")
lib = verify(ffi, "test_unnamed_bitfield_2", """
struct A {
short c : 1; short : 1; short d : 1; short : 1;
};
""")
p = ffi.new("struct A *")
assert ffi.sizeof(p[0]) == ffi.sizeof("short")
def test_unnamed_bitfield_3():
ffi = FFI()
ffi.cdef("""struct A { struct { char : 1; char : 1; } b; };""")
lib = verify(ffi, "test_unnamed_bitfield_3", """
struct A { struct { char : 1; char : 1; } b; };
""")
p = ffi.new("struct A *")
assert ffi.sizeof(p[0]) == 1
# Note: on gcc, the type name is ignored for anonymous bitfields
# and that's why the result is 1. On MSVC, the result is
# sizeof("char") which is also 1.
def test_unnamed_bitfield_4():
ffi = FFI()
ffi.cdef("""struct A { struct {
unsigned c : 1; unsigned : 1; unsigned d : 1; unsigned : 1; } a;
};
struct B { struct A a; };""")
lib = verify(ffi, "test_unnamed_bitfield_4", """
struct A { struct {
unsigned c : 1; unsigned : 1; unsigned d : 1; unsigned : 1; } a;
};
struct B { struct A a; };
""")
b = ffi.new("struct B *")
a = ffi.new("struct A *")
assert ffi.sizeof(a[0]) == ffi.sizeof("unsigned")
assert ffi.sizeof(b[0]) == ffi.sizeof(a[0])
def test_struct_with_func_with_struct_pointer_arg():
ffi = FFI()
ffi.cdef("""struct BinaryTree {
int (* CompareKey)(struct BinaryTree *tree);
};""")
lib = verify(ffi, "test_struct_with_func_with_struct_pointer_arg", """
struct BinaryTree {
int (* CompareKey)(struct BinaryTree *tree);
};
""")
ffi.new("struct BinaryTree *")
def test_struct_with_func_with_struct_arg():
ffi = FFI()
ffi.cdef("""struct BinaryTree {
int (* CompareKey)(struct BinaryTree tree);
};""")
lib = verify(ffi, "test_struct_with_func_with_struct_arg", """
struct BinaryTree {
int (* CompareKey)(struct BinaryTree tree);
};
""")
py.test.raises(RuntimeError, ffi.new, "struct BinaryTree *")
def test_passing_large_list():
ffi = FFI()
ffi.cdef("""void passing_large_list(long[]);""")
lib = verify(ffi, "test_passing_large_list", """
static void passing_large_list(long a[]) { }
""")
arg = list(range(20000000))
lib.passing_large_list(arg)
# assert did not segfault
|
py | b400e80196372ebf1079805b548e3277169416f3 | from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
urlpatterns = [
path('index/', views.index, name='index'),
path('user/login/', views.login_user, name='login'),
path('user/submit_login/', views.submit_login, name='submit_login'),
path('user/dashboard/<int:user_id>', views.dashboard, name='dashboard'),
path('user/dashboard/edit/<int:user_id>/', views.user_edit, name='user_edit'),
] |
py | b400e845b78ffb5afd09f04e7e696f84a31ee681 | from django.apps import AppConfig
class MoviesConfig(AppConfig):
name = 'iioy.movies'
def ready(self):
from iioy.movies import signals # noqa
|
py | b400ea0692bc67b3c85a5f672a1f19fef307c095 | # This code extends codebase from followings:
# https://github.com/dstamoulis/single-path-nas
# https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
#
# This project incorporates material from the project listed above, and it
# is accessible under their original license terms (Apache License 2.0)
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import tensorflow as tf
from absl import flags
from graph.blockargs import BlockArgsDecoder
from graph.gparams import GlobalParams
from graph.jsonnet import Net
from util.io_utils import tf_open_file_in_path
from util.utils import AttrDict
FLAGS = flags.FLAGS
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def build_logits_latency_tensordict(images, model_json_path, training, override_params=None, model_dir=None,
ignore_latency=False, log_searchableblock_tensor='min'):
"""A helper function to creates the NAS Supernet and returns predicted logits.
Args:
images: input images tensor.
model_json_path: string, the model args json path
training: boolean, whether the model is constructed for training.
override_params: A dictionary of params for overriding. Fields must exist in
GlobalParams.
model_dir: If not None, block_args are written to model_params.txt in model_dir.
ignore_latency: If true, terms related to latency will be ignored
log_searchableblock_tensor: 'never' : don't log tensordict from model
'min' : log only use_conditions in searchable blocks
'all' : log all tensordict
Returns:
logits: the logits tensor of classes.
latency: the total latency based on the threshold decisions
tensordict_to_write_on_tensorboard: tensors you want to watch on tensorboard.
"""
assert isinstance(images, tf.Tensor)
model_args, global_params = get_model_args_and_gparams(model_json_path, override_params)
if model_dir:
save_model_args(model_args, model_dir)
in_w = FLAGS.input_image_size
with tf.variable_scope('model'):
model = Net(model_args, global_params)
logits = model(images, training=training)
logits = tf.identity(logits, 'logits')
input_shape = (in_w, in_w, 3)
tf.logging.info("built model with trainable params %d and flops %d" %
(model.get_params_considering_bnbias(input_shape),
model.get_flops(input_shape)))
tensordict = {}
log_searchableblock_tensor = log_searchableblock_tensor.lower()
if log_searchableblock_tensor == 'all':
tensordict = model.tensordict_to_write_on_tensorboard()
elif log_searchableblock_tensor == 'min':
tensordict = {}
for name, val in model.tensordict_to_write_on_tensorboard().items():
is_useconds = 'use' in name
if is_useconds:
tensordict[name] = val
else:
assert log_searchableblock_tensor == 'never'
if ignore_latency:
total_latency = tf.zeros((1,))
else:
from graph.latency_estimator import get_constraint_estimator
latency_estimator = get_constraint_estimator(FLAGS.constraint.lower(), FLAGS.constraint_parse_key,
FLAGS.constraint_div_unit)
total_latency, tensordict_latency = latency_estimator.estim_constraint(model, in_w)
tensordict.update(tensordict_latency)
return logits, total_latency, tensordict
def get_model_args_and_gparams(model_json_path, override_params):
"""
Gets model_args from json file.
Supports both tensorflow-style stages_args and more human-readable style.
"""
model_json = json.load(tf_open_file_in_path("", model_json_path, "r"), object_pairs_hook=AttrDict)
model_args = AttrDict(model_json)
decoder = BlockArgsDecoder()
model_args.stages_args = decoder.decode_to_stages_args(model_args.stages_args)
gparams_dict = parse_gparams_from_model_args(model_args)
global_params = GlobalParams(**gparams_dict)
if override_params:
global_params = global_params._replace(**override_params)
tf.logging.info('global_params= %s', global_params)
tf.logging.info('stages_args= %s', model_args.stages_args)
return model_args, global_params
def parse_gparams_from_model_args(model_args):
def update_gparams_if_exist_in_modelargs(gparams_dict, model_args, key):
val = model_args.get(key)
if val:
gparams_dict[key] = val
return gparams_dict
gparams_dict = {}
for key in ['act_fn', 'se_inner_act_fn', 'se_gating_fn']:
gparams_dict = update_gparams_if_exist_in_modelargs(gparams_dict, model_args, key)
return gparams_dict
def save_model_args(model_args, model_dir, filename='scaled_model_args.json'):
f = tf_open_file_in_path(model_dir, filename, 'w')
json.dump(model_args, f, indent=4, ensure_ascii=False)
|
py | b400ea285395e19e2ce664e69f96b246a4209b96 | # from __future__ import unicode_literals
## check python version
import sys
py_version = sys.version_info.major
## Parser for version strings
try:
from packaging.version import parse as VersionParser
except ImportError:
from distutils.version import LooseVersion as VersionParser
spacy_version = spacy.about.__version__
if 'spacy_entity' in locals() and spacy_entity == False and VersionParser(spacy_version) >= VersionParser("2"):
nlp = spacy.load(model, disable=['ner'])
else:
nlp = spacy.load(model)
import re
|
py | b400eb4976b1ced694cbb6758fb0348295d41ea0 | """
--- Exercise statement: N°17 - echo invertido
Escribir un programa cliente-servidor de echo reverso, es decir, un servidor que quede atendiendo conexiones
en un puerto pasado por argumento (-p), y un cliente que conecte a dicho servidor.
El cliente deberá leer desde el stdin cadenas de caracteres, las enviará al servidor, y el servidor responderá
con la misma cadena en orden invertido, es decir, algo así:
./cliente -h 127.0.0.1 -p 1234
>> hola
--> aloh
>> que tal?
--> ?lat euq
Hacer uso de fork o multiprocessing para permitirle al servidor atender varios clientes simultaneamente.
Usar socket INET STREAM (TCP).
Leer los parámetros por línea de comandos (usar getopt.getopt):
Cliente:
-h direccion_ip_servidor
-p puerto_servidor
Servidor:
-p puerto_servidor
(atiende en todas las ip's locales (0.0.0.0)
Tag: echo_inv
"""
import socket
import sys
import getopt
import multiprocessing
def invert_char(c_socket, address):
print(f"\nGot a connection from {str(address)}.")
while True:
data = c_socket.recv(1024)
data = data.decode()
if data == "\n":
break
reversed_string = data[::-1]
c_socket.send(reversed_string.encode())
if __name__ == '__main__':
(option, value) = getopt.getopt(sys.argv[1:], "p:")
if len(sys.argv[1:]) <= 1:
print("Usage: -p server_port")
else:
host = ""
port = 0
for (opt, val) in option:
if opt == "-p":
port = val
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((host, int(port)))
serversocket.listen(5)
# Accepts 5 connections max from clients
while True:
clientsocket, addr = serversocket.accept()
client = multiprocessing.Process(target=invert_char, args=(clientsocket, addr))
client.start()
|
py | b400ec852384af30b1ada2accbd73825b2adb2a3 | # -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network,
sensors) in Python. Supported platforms:
- Linux
- Windows
- macOS
- FreeBSD
- OpenBSD
- NetBSD
- Sun Solaris
- AIX
Works with Python versions from 2.6 to 3.4+.
"""
from __future__ import division
import collections
import contextlib
import datetime
import errno
import functools
import os
import signal
import subprocess
import sys
import threading
import time
try:
import pwd
except ImportError:
pwd = None
from . import _common
from ._common import deprecated_method
from ._common import memoize
from ._common import memoize_when_activated
from ._common import wrap_numbers as _wrap_numbers
from ._compat import long
from ._compat import PY3 as _PY3
from ._common import STATUS_DEAD
from ._common import STATUS_DISK_SLEEP
from ._common import STATUS_IDLE
from ._common import STATUS_LOCKED
from ._common import STATUS_PARKED
from ._common import STATUS_RUNNING
from ._common import STATUS_SLEEPING
from ._common import STATUS_STOPPED
from ._common import STATUS_TRACING_STOP
from ._common import STATUS_WAITING
from ._common import STATUS_WAKING
from ._common import STATUS_ZOMBIE
from ._common import CONN_CLOSE
from ._common import CONN_CLOSE_WAIT
from ._common import CONN_CLOSING
from ._common import CONN_ESTABLISHED
from ._common import CONN_FIN_WAIT1
from ._common import CONN_FIN_WAIT2
from ._common import CONN_LAST_ACK
from ._common import CONN_LISTEN
from ._common import CONN_NONE
from ._common import CONN_SYN_RECV
from ._common import CONN_SYN_SENT
from ._common import CONN_TIME_WAIT
from ._common import NIC_DUPLEX_FULL
from ._common import NIC_DUPLEX_HALF
from ._common import NIC_DUPLEX_UNKNOWN
from ._common import AIX
from ._common import BSD
from ._common import FREEBSD # NOQA
from ._common import LINUX
from ._common import MACOS
from ._common import NETBSD # NOQA
from ._common import OPENBSD # NOQA
from ._common import OSX # deprecated alias
from ._common import POSIX # NOQA
from ._common import SUNOS
from ._common import WINDOWS
if LINUX:
# This is public API and it will be retrieved from _pslinux.py
# via sys.modules.
PROCFS_PATH = "/proc"
from . import _pslinux as _psplatform
from ._pslinux import IOPRIO_CLASS_BE # NOQA
from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
from ._pslinux import IOPRIO_CLASS_NONE # NOQA
from ._pslinux import IOPRIO_CLASS_RT # NOQA
# Linux >= 2.6.36
if _psplatform.HAS_PRLIMIT:
from ._psutil_linux import RLIM_INFINITY # NOQA
from ._psutil_linux import RLIMIT_AS # NOQA
from ._psutil_linux import RLIMIT_CORE # NOQA
from ._psutil_linux import RLIMIT_CPU # NOQA
from ._psutil_linux import RLIMIT_DATA # NOQA
from ._psutil_linux import RLIMIT_FSIZE # NOQA
from ._psutil_linux import RLIMIT_LOCKS # NOQA
from ._psutil_linux import RLIMIT_MEMLOCK # NOQA
from ._psutil_linux import RLIMIT_NOFILE # NOQA
from ._psutil_linux import RLIMIT_NPROC # NOQA
from ._psutil_linux import RLIMIT_RSS # NOQA
from ._psutil_linux import RLIMIT_STACK # NOQA
# Kinda ugly but considerably faster than using hasattr() and
# setattr() against the module object (we are at import time:
# speed matters).
from . import _psutil_linux
try:
RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
except AttributeError:
pass
try:
RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
except AttributeError:
pass
try:
RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
except AttributeError:
pass
try:
RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
except AttributeError:
pass
try:
RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
except AttributeError:
pass
elif WINDOWS:
from . import _pswindows as _psplatform
from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
from ._pswindows import CONN_DELETE_TCB # NOQA
from ._pswindows import IOPRIO_VERYLOW # NOQA
from ._pswindows import IOPRIO_LOW # NOQA
from ._pswindows import IOPRIO_NORMAL # NOQA
from ._pswindows import IOPRIO_HIGH # NOQA
elif MACOS:
from . import _psosx as _psplatform
elif BSD:
from . import _psbsd as _psplatform
elif SUNOS:
from . import _pssunos as _psplatform
from ._pssunos import CONN_BOUND # NOQA
from ._pssunos import CONN_IDLE # NOQA
# This is public writable API which is read from _pslinux.py and
# _pssunos.py via sys.modules.
PROCFS_PATH = "/proc"
elif AIX:
from . import _psaix as _psplatform
# This is public API and it will be retrieved from _pslinux.py
# via sys.modules.
PROCFS_PATH = "/proc"
else: # pragma: no cover
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__ = [
# exceptions
"Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
"TimeoutExpired",
# constants
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
"STATUS_PARKED",
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
"AF_LINK",
"NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
"POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
"BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
"SUNOS", "WINDOWS", "AIX",
# classes
"Process", "Popen",
# functions
"pid_exists", "pids", "process_iter", "wait_procs", # proc
"virtual_memory", "swap_memory", # memory
"cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
"cpu_stats", # "cpu_freq", "getloadavg"
"net_io_counters", "net_connections", "net_if_addrs", # network
"net_if_stats",
"disk_io_counters", "disk_partitions", "disk_usage", # disk
# "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
"users", "boot_time", # others
]
__all__.extend(_psplatform.__extra__all__)
__author__ = "Giampaolo Rodola'"
__version__ = "5.6.3"
version_info = tuple([int(num) for num in __version__.split('.')])
_timer = getattr(time, 'monotonic', time.time)
AF_LINK = _psplatform.AF_LINK
POWER_TIME_UNLIMITED = _common.POWER_TIME_UNLIMITED
POWER_TIME_UNKNOWN = _common.POWER_TIME_UNKNOWN
_TOTAL_PHYMEM = None
_LOWEST_PID = None
# Sanity check in case the user messed up with psutil installation
# or did something weird with sys.path. In this case we might end
# up importing a python module using a C extension module which
# was compiled for a different version of psutil.
# We want to prevent that by failing sooner rather than later.
# See: https://github.com/giampaolo/psutil/issues/564
if (int(__version__.replace('.', '')) !=
getattr(_psplatform.cext, 'version', None)):
msg = "version conflict: %r C extension module was built for another " \
"version of psutil" % getattr(_psplatform.cext, "__file__")
if hasattr(_psplatform.cext, 'version'):
msg += " (%s instead of %s)" % (
'.'.join([x for x in str(_psplatform.cext.version)]), __version__)
else:
msg += " (different than %s)" % __version__
msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
getattr(_psplatform.cext, "__file__",
"the existing psutil install directory"))
msg += " or clean the virtual env somehow, then reinstall"
raise ImportError(msg)
# =====================================================================
# --- Exceptions
# =====================================================================
class Error(Exception):
"""Base exception class. All other psutil exceptions inherit
from this one.
"""
def __init__(self, msg=""):
Exception.__init__(self, msg)
self.msg = msg
def __repr__(self):
ret = "psutil.%s %s" % (self.__class__.__name__, self.msg)
return ret.strip()
__str__ = __repr__
class NoSuchProcess(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists.
"""
def __init__(self, pid, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process no longer exists " + details
class ZombieProcess(NoSuchProcess):
"""Exception raised when querying a zombie process. This is
raised on macOS, BSD and Solaris only, and not always: depending
on the query the OS may be able to succeed anyway.
On Linux all zombie processes are querable (hence this is never
raised). Windows doesn't have zombie processes.
"""
def __init__(self, pid, name=None, ppid=None, msg=None):
NoSuchProcess.__init__(self, msg)
self.pid = pid
self.ppid = ppid
self.name = name
self.msg = msg
if msg is None:
args = ["pid=%s" % pid]
if name:
args.append("name=%s" % repr(self.name))
if ppid:
args.append("ppid=%s" % self.ppid)
details = "(%s)" % ", ".join(args)
self.msg = "process still exists but it's a zombie " + details
class AccessDenied(Error):
"""Exception raised when permission to perform an action is denied."""
def __init__(self, pid=None, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if (pid is not None) and (name is not None):
self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg = "(pid=%s)" % self.pid
else:
self.msg = ""
class TimeoutExpired(Error):
"""Raised on Process.wait(timeout) if timeout expires and process
is still alive.
"""
def __init__(self, seconds, pid=None, name=None):
Error.__init__(self, "timeout after %s seconds" % seconds)
self.seconds = seconds
self.pid = pid
self.name = name
if (pid is not None) and (name is not None):
self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg += " (pid=%s)" % self.pid
# Push exception classes into platform specific module namespace.
_psplatform.NoSuchProcess = NoSuchProcess
_psplatform.ZombieProcess = ZombieProcess
_psplatform.AccessDenied = AccessDenied
_psplatform.TimeoutExpired = TimeoutExpired
if POSIX:
from . import _psposix
_psposix.TimeoutExpired = TimeoutExpired
# =====================================================================
# --- Utils
# =====================================================================
if hasattr(_psplatform, 'ppid_map'):
# Faster version (Windows and Linux).
_ppid_map = _psplatform.ppid_map
else:
def _ppid_map():
"""Return a {pid: ppid, ...} dict for all running processes in
one shot. Used to speed up Process.children().
"""
ret = {}
for pid in pids():
try:
ret[pid] = _psplatform.Process(pid).ppid()
except (NoSuchProcess, ZombieProcess):
pass
return ret
def _assert_pid_not_reused(fun):
"""Decorator which raises NoSuchProcess in case a process is no
longer running or its PID has been reused.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
return fun(self, *args, **kwargs)
return wrapper
def _pprint_secs(secs):
"""Format seconds in a human readable form."""
now = time.time()
secs_ago = int(now - secs)
if secs_ago < 60 * 60 * 24:
fmt = "%H:%M:%S"
else:
fmt = "%Y-%m-%d %H:%M:%S"
return datetime.datetime.fromtimestamp(secs).strftime(fmt)
# =====================================================================
# --- Process class
# =====================================================================
class Process(object):
"""Represents an OS process with the given PID.
If PID is omitted current process PID (os.getpid()) is used.
Raise NoSuchProcess if PID does not exist.
Note that most of the methods of this class do not make sure
the PID of the process being queried has been reused over time.
That means you might end up retrieving an information referring
to another process in case the original one this instance
refers to is gone in the meantime.
The only exceptions for which process identity is pre-emptively
checked and guaranteed are:
- parent()
- children()
- nice() (set)
- ionice() (set)
- rlimit() (set)
- cpu_affinity (set)
- suspend()
- resume()
- send_signal()
- terminate()
- kill()
To prevent this problem for all other methods you can:
- use is_running() before querying the process
- if you're continuously iterating over a set of Process
instances use process_iter() which pre-emptively checks
process identity for every yielded instance
"""
def __init__(self, pid=None):
self._init(pid)
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError('pid must be an integer (got %r)' % pid)
if pid < 0:
raise ValueError('pid must be a positive integer (got %s)'
% pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
self._lock = threading.RLock()
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# We should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user.
pass
except ZombieProcess:
# Zombies can still be queried by this class (although
# not always) and pids() return them so just go on.
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = 'no process found with pid %s' % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
def __str__(self):
try:
info = collections.OrderedDict()
except AttributeError:
info = {} # Python 2.6
info["pid"] = self.pid
try:
info["name"] = self.name()
if self._create_time:
info['started'] = _pprint_secs(self._create_time)
except ZombieProcess:
info["status"] = "zombie"
except NoSuchProcess:
info["status"] = "terminated"
except AccessDenied:
pass
return "%s.%s(%s)" % (
self.__class__.__module__,
self.__class__.__name__,
", ".join(["%s=%r" % (k, v) for k, v in info.items()]))
__repr__ = __str__
def __eq__(self, other):
# Test for equality with another Process object based
# on PID and creation time.
if not isinstance(other, Process):
return NotImplemented
return self._ident == other._ident
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._hash is None:
self._hash = hash(self._ident)
return self._hash
@property
def pid(self):
"""The process PID."""
return self._pid
# --- utility methods
@contextlib.contextmanager
def oneshot(self):
"""Utility context manager which considerably speeds up the
retrieval of multiple process information at the same time.
Internally different process info (e.g. name, ppid, uids,
gids, ...) may be fetched by using the same routine, but
only one information is returned and the others are discarded.
When using this context manager the internal routine is
executed once (in the example below on name()) and the
other info are cached.
The cache is cleared when exiting the context manager block.
The advice is to use this every time you retrieve more than
one information about the process. If you're lucky, you'll
get a hell of a speedup.
>>> import psutil
>>> p = psutil.Process()
>>> with p.oneshot():
... p.name() # collect multiple info
... p.cpu_times() # return cached value
... p.cpu_percent() # return cached value
... p.create_time() # return cached value
...
>>>
"""
with self._lock:
if hasattr(self, "_cache"):
# NOOP: this covers the use case where the user enters the
# context twice:
#
# >>> with p.oneshot():
# ... with p.oneshot():
# ...
#
# Also, since as_dict() internally uses oneshot()
# I expect that the code below will be a pretty common
# "mistake" that the user will make, so let's guard
# against that:
#
# >>> with p.oneshot():
# ... p.as_dict()
# ...
yield
else:
try:
# cached in case cpu_percent() is used
self.cpu_times.cache_activate(self)
# cached in case memory_percent() is used
self.memory_info.cache_activate(self)
# cached in case parent() is used
self.ppid.cache_activate(self)
# cached in case username() is used
if POSIX:
self.uids.cache_activate(self)
# specific implementation cache
self._proc.oneshot_enter()
yield
finally:
self.cpu_times.cache_deactivate(self)
self.memory_info.cache_deactivate(self)
self.ppid.cache_deactivate(self)
if POSIX:
self.uids.cache_deactivate(self)
self._proc.oneshot_exit()
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If *attrs* is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
*ad_value* is the value which gets assigned in case
AccessDenied or ZombieProcess exception is raised when
retrieving that particular process information.
"""
valid_names = _as_dict_attrnames
if attrs is not None:
if not isinstance(attrs, (list, tuple, set, frozenset)):
raise TypeError("invalid attrs type %s" % type(attrs))
attrs = set(attrs)
invalid_names = attrs - valid_names
if invalid_names:
raise ValueError("invalid attr name%s %s" % (
"s" if len(invalid_names) > 1 else "",
", ".join(map(repr, invalid_names))))
retdict = dict()
ls = attrs or valid_names
with self.oneshot():
for name in ls:
try:
if name == 'pid':
ret = self.pid
else:
meth = getattr(self, name)
ret = meth()
except (AccessDenied, ZombieProcess):
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
def parent(self):
"""Return the parent process as a Process object pre-emptively
checking whether PID has been reused.
If no parent is known return None.
"""
lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
if self.pid == lowest_pid:
return None
ppid = self.ppid()
if ppid is not None:
ctime = self.create_time()
try:
parent = Process(ppid)
if parent.create_time() <= ctime:
return parent
# ...else ppid has been reused by another process
except NoSuchProcess:
pass
def parents(self):
"""Return the parents of this process as a list of Process
instances. If no parents are known return an empty list.
"""
parents = []
proc = self.parent()
while proc is not None:
parents.append(proc)
proc = proc.parent()
return parents
def is_running(self):
"""Return whether this process is running.
It also checks if PID has been reused by another process in
which case return False.
"""
if self._gone:
return False
try:
# Checking if PID is alive is not enough as the PID might
# have been reused by another process: we also want to
# verify process identity.
# Process identity / uniqueness over time is guaranteed by
# (PID + creation time) and that is verified in __eq__.
return self == Process(self.pid)
except ZombieProcess:
# We should never get here as it's already handled in
# Process.__init__; here just for extra safety.
return True
except NoSuchProcess:
self._gone = True
return False
# --- actual API
@memoize_when_activated
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if POSIX:
return self._proc.ppid()
else: # pragma: no cover
self._ppid = self._ppid or self._proc.ppid()
return self._ppid
def name(self):
"""The process name. The return value is cached after first call."""
# Process name is only cached on Windows as on POSIX it may
# change, see:
# https://github.com/giampaolo/psutil/issues/692
if WINDOWS and self._name is not None:
return self._name
name = self._proc.name()
if POSIX and len(name) >= 15:
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline()
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
self._name = name
self._proc._name = name
return name
def exe(self):
"""The process executable as an absolute path.
May also be an empty string.
The return value is cached after first call.
"""
def guess_it(fallback):
# try to guess exe from cmdline[0] in absence of a native
# exe representation
cmdline = self.cmdline()
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
exe = cmdline[0] # the possible exe
# Attempt to guess only in case of an absolute path.
# It is not safe otherwise as the process might have
# changed cwd.
if (os.path.isabs(exe) and
os.path.isfile(exe) and
os.access(exe, os.X_OK)):
return exe
if isinstance(fallback, AccessDenied):
raise fallback
return fallback
if self._exe is None:
try:
exe = self._proc.exe()
except AccessDenied as err:
return guess_it(fallback=err)
else:
if not exe:
# underlying implementation can legitimately return an
# empty string; if that's the case we don't want to
# raise AD while guessing from the cmdline
try:
exe = guess_it(fallback=exe)
except AccessDenied:
pass
self._exe = exe
return self._exe
def cmdline(self):
"""The command line this process has been called with."""
return self._proc.cmdline()
def status(self):
"""The process current status as a STATUS_* constant."""
try:
return self._proc.status()
except ZombieProcess:
return STATUS_ZOMBIE
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if POSIX:
if pwd is None:
# might happen if python was installed from sources
raise ImportError(
"requires pwd module shipped with standard python")
real_uid = self.uids().real
try:
return pwd.getpwuid(real_uid).pw_name
except KeyError:
# the uid can't be resolved by the system
return str(real_uid)
else:
return self._proc.username()
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
The return value is cached after first call.
"""
if self._create_time is None:
self._create_time = self._proc.create_time()
return self._create_time
def cwd(self):
"""Process current working directory as an absolute path."""
return self._proc.cwd()
def nice(self, value=None):
"""Get or set process niceness (priority)."""
if value is None:
return self._proc.nice_get()
else:
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
self._proc.nice_set(value)
if POSIX:
@memoize_when_activated
def uids(self):
"""Return process UIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.uids()
def gids(self):
"""Return process GIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.gids()
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._proc.terminal()
def num_fds(self):
"""Return the number of file descriptors opened by this
process (POSIX only).
"""
return self._proc.num_fds()
# Linux, BSD, AIX and Windows only
if hasattr(_psplatform.Process, "io_counters"):
def io_counters(self):
"""Return process I/O statistics as a
(read_count, write_count, read_bytes, write_bytes)
namedtuple.
Those are the number of read/write calls performed and the
amount of bytes read and written by the process.
"""
return self._proc.io_counters()
# Linux and Windows >= Vista only
if hasattr(_psplatform.Process, "ionice_get"):
def ionice(self, ioclass=None, value=None):
"""Get or set process I/O niceness (priority).
On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
*value* is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
On Windows only *ioclass* is used and it can be set to 2
(normal), 1 (low) or 0 (very low).
Available on Linux and Windows > Vista only.
"""
if ioclass is None:
if value is not None:
raise ValueError("'ioclass' argument must be specified")
return self._proc.ionice_get()
else:
return self._proc.ionice_set(ioclass, value)
# Linux only
if hasattr(_psplatform.Process, "rlimit"):
def rlimit(self, resource, limits=None):
"""Get or set process resource limits as a (soft, hard)
tuple.
*resource* is one of the RLIMIT_* constants.
*limits* is supposed to be a (soft, hard) tuple.
See "man prlimit" for further info.
Available on Linux only.
"""
if limits is None:
return self._proc.rlimit(resource)
else:
return self._proc.rlimit(resource, limits)
# Windows, Linux and FreeBSD only
if hasattr(_psplatform.Process, "cpu_affinity_get"):
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified, *cpus* must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
If an empty list is passed, all egible CPUs are assumed
(and set).
(Windows, Linux and BSD only).
"""
if cpus is None:
return list(set(self._proc.cpu_affinity_get()))
else:
if not cpus:
if hasattr(self._proc, "_get_eligible_cpus"):
cpus = self._proc._get_eligible_cpus()
else:
cpus = tuple(range(len(cpu_times(percpu=True))))
self._proc.cpu_affinity_set(list(set(cpus)))
# Linux, FreeBSD, SunOS
if hasattr(_psplatform.Process, "cpu_num"):
def cpu_num(self):
"""Return what CPU this process is currently running on.
The returned number should be <= psutil.cpu_count()
and <= len(psutil.cpu_percent(percpu=True)).
It may be used in conjunction with
psutil.cpu_percent(percpu=True) to observe the system
workload distributed across CPUs.
"""
return self._proc.cpu_num()
# Linux, macOS, Windows, Solaris, AIX
if hasattr(_psplatform.Process, "environ"):
def environ(self):
"""The environment variables of the process as a dict. Note: this
might not reflect changes made after the process started. """
return self._proc.environ()
if WINDOWS:
def num_handles(self):
"""Return the number of handles opened by this process
(Windows only).
"""
return self._proc.num_handles()
def num_ctx_switches(self):
"""Return the number of voluntary and involuntary context
switches performed by this process.
"""
return self._proc.num_ctx_switches()
def num_threads(self):
"""Return the number of threads used by this process."""
return self._proc.num_threads()
if hasattr(_psplatform.Process, "threads"):
def threads(self):
"""Return threads opened by process as a list of
(id, user_time, system_time) namedtuples representing
thread id and thread CPU times (user/system).
On OpenBSD this method requires root access.
"""
return self._proc.threads()
@_assert_pid_not_reused
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If *recursive* is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
ppid_map = _ppid_map()
ret = []
if not recursive:
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except (NoSuchProcess, ZombieProcess):
pass
else:
# Construct a {pid: [child pids]} dict
reverse_ppid_map = collections.defaultdict(list)
for pid, ppid in ppid_map.items():
reverse_ppid_map[ppid].append(pid)
# Recursively traverse that dict, starting from self.pid,
# such that we only call Process() on actual children
seen = set()
stack = [self.pid]
while stack:
pid = stack.pop()
if pid in seen:
# Since pids can be reused while the ppid_map is
# constructed, there may be rare instances where
# there's a cycle in the recorded process "tree".
continue
seen.add(pid)
for child_pid in reverse_ppid_map[pid]:
try:
child = Process(child_pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
if intime:
ret.append(child)
stack.append(child_pid)
except (NoSuchProcess, ZombieProcess):
pass
return ret
def cpu_percent(self, interval=None):
"""Return a float representing the current process CPU
utilization as a percentage.
When *interval* is 0.0 or None (default) compares process times
to system CPU times elapsed since last call, returning
immediately (non-blocking). That means that the first time
this is called it will return a meaningful 0.0 value.
When *interval* is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
In this case is recommended for accuracy that this function
be called with at least 0.1 seconds between calls.
A value > 100.0 can be returned in case of processes running
multiple threads on different CPU cores.
The returned value is explicitly NOT split evenly between
all available logical CPUs. This means that a busy loop process
running on a system with 2 logical CPUs will be reported as
having 100% CPU utilization instead of 50%.
Examples:
>>> import psutil
>>> p = psutil.Process(os.getpid())
>>> # blocking
>>> p.cpu_percent(interval=1)
2.0
>>> # non-blocking (percentage since last call)
>>> p.cpu_percent(interval=None)
2.9
>>>
"""
blocking = interval is not None and interval > 0.0
if interval is not None and interval < 0:
raise ValueError("interval is not positive (got %r)" % interval)
num_cpus = cpu_count() or 1
def timer():
return _timer() * num_cpus
if blocking:
st1 = timer()
pt1 = self._proc.cpu_times()
time.sleep(interval)
st2 = timer()
pt2 = self._proc.cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = timer()
pt2 = self._proc.cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# This is the utilization split evenly between all CPUs.
# E.g. a busy loop process on a 2-CPU-cores system at this
# point is reported as 50% instead of 100%.
overall_cpus_percent = ((delta_proc / delta_time) * 100)
except ZeroDivisionError:
# interval was too low
return 0.0
else:
# Note 1:
# in order to emulate "top" we multiply the value for the num
# of CPU cores. This way the busy process will be reported as
# having 100% (or more) usage.
#
# Note 2:
# taskmgr.exe on Windows differs in that it will show 50%
# instead.
#
# Note 3:
# a percentage > 100 is legitimate as it can result from a
# process with multiple threads running on different CPU
# cores (top does the same), see:
# http://stackoverflow.com/questions/1032357
# https://github.com/giampaolo/psutil/issues/474
single_cpu_percent = overall_cpus_percent * num_cpus
return round(single_cpu_percent, 1)
@memoize_when_activated
def cpu_times(self):
"""Return a (user, system, children_user, children_system)
namedtuple representing the accumulated process time, in
seconds.
This is similar to os.times() but per-process.
On macOS and Windows children_user and children_system are
always set to 0.
"""
return self._proc.cpu_times()
@memoize_when_activated
def memory_info(self):
"""Return a namedtuple with variable fields depending on the
platform, representing memory information about the process.
The "portable" fields available on all plaforms are `rss` and `vms`.
All numbers are expressed in bytes.
"""
return self._proc.memory_info()
@deprecated_method(replacement="memory_info")
def memory_info_ex(self):
return self.memory_info()
def memory_full_info(self):
"""This method returns the same information as memory_info(),
plus, on some platform (Linux, macOS, Windows), also provides
additional metrics (USS, PSS and swap).
The additional metrics provide a better representation of actual
process memory usage.
Namely USS is the memory which is unique to a process and which
would be freed if the process was terminated right now.
It does so by passing through the whole process address.
As such it usually requires higher user privileges than
memory_info() and is considerably slower.
"""
return self._proc.memory_full_info()
def memory_percent(self, memtype="rss"):
"""Compare process memory to total physical system memory and
calculate process memory utilization as a percentage.
*memtype* argument is a string that dictates what type of
process memory you want to compare against (defaults to "rss").
The list of available strings can be obtained like this:
>>> psutil.Process().memory_info()._fields
('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
"""
valid_types = list(_psplatform.pfullmem._fields)
if memtype not in valid_types:
raise ValueError("invalid memtype %r; valid types are %r" % (
memtype, tuple(valid_types)))
fun = self.memory_info if memtype in _psplatform.pmem._fields else \
self.memory_full_info
metrics = fun()
value = getattr(metrics, memtype)
# use cached value if available
total_phymem = _TOTAL_PHYMEM or virtual_memory().total
if not total_phymem > 0:
# we should never get here
raise ValueError(
"can't calculate process memory percent because "
"total physical system memory is not positive (%r)"
% total_phymem)
return (value / float(total_phymem)) * 100
if hasattr(_psplatform.Process, "memory_maps"):
def memory_maps(self, grouped=True):
"""Return process' mapped memory regions as a list of namedtuples
whose fields are variable depending on the platform.
If *grouped* is True the mapped regions with the same 'path'
are grouped together and the different memory fields are summed.
If *grouped* is False every mapped region is shown as a single
entity and the namedtuple will also include the mapped region's
address space ('addr') and permission set ('perms').
"""
it = self._proc.memory_maps()
if grouped:
d = {}
for tupl in it:
path = tupl[2]
nums = tupl[3:]
try:
d[path] = map(lambda x, y: x + y, d[path], nums)
except KeyError:
d[path] = nums
nt = _psplatform.pmmap_grouped
return [nt(path, *d[path]) for path in d] # NOQA
else:
nt = _psplatform.pmmap_ext
return [nt(*x) for x in it]
def open_files(self):
"""Return files opened by process as a list of
(path, fd) namedtuples including the absolute file name
and file descriptor number.
"""
return self._proc.open_files()
def connections(self, kind='inet'):
"""Return socket connections opened by process as a list of
(fd, family, type, laddr, raddr, status) namedtuples.
The *kind* parameter filters for connections that match the
following criteria:
+------------+----------------------------------------------------+
| Kind Value | Connections using |
+------------+----------------------------------------------------+
| inet | IPv4 and IPv6 |
| inet4 | IPv4 |
| inet6 | IPv6 |
| tcp | TCP |
| tcp4 | TCP over IPv4 |
| tcp6 | TCP over IPv6 |
| udp | UDP |
| udp4 | UDP over IPv4 |
| udp6 | UDP over IPv6 |
| unix | UNIX socket (both UDP and TCP protocols) |
| all | the sum of all the possible families and protocols |
+------------+----------------------------------------------------+
"""
return self._proc.connections(kind)
# --- signals
if POSIX:
def _send_signal(self, sig):
assert not self.pid < 0, self.pid
if self.pid == 0:
# see "man 2 kill"
raise ValueError(
"preventing sending signal to process with PID 0 as it "
"would affect every process in the process group of the "
"calling process (os.getpid()) instead of PID 0")
try:
os.kill(self.pid, sig)
except OSError as err:
if err.errno == errno.ESRCH:
if OPENBSD and pid_exists(self.pid):
# We do this because os.kill() lies in case of
# zombie processes.
raise ZombieProcess(self.pid, self._name, self._ppid)
else:
self._gone = True
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
@_assert_pid_not_reused
def send_signal(self, sig):
"""Send a signal *sig* to process pre-emptively checking
whether PID has been reused (see signal module constants) .
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
if POSIX:
self._send_signal(sig)
else: # pragma: no cover
if sig == signal.SIGTERM:
self._proc.kill()
# py >= 2.7
elif sig in (getattr(signal, "CTRL_C_EVENT", object()),
getattr(signal, "CTRL_BREAK_EVENT", object())):
self._proc.send_signal(sig)
else:
raise ValueError(
"only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals "
"are supported on Windows")
@_assert_pid_not_reused
def suspend(self):
"""Suspend process execution with SIGSTOP pre-emptively checking
whether PID has been reused.
On Windows this has the effect ot suspending all process threads.
"""
if POSIX:
self._send_signal(signal.SIGSTOP)
else: # pragma: no cover
self._proc.suspend()
@_assert_pid_not_reused
def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if POSIX:
self._send_signal(signal.SIGCONT)
else: # pragma: no cover
self._proc.resume()
@_assert_pid_not_reused
def terminate(self):
"""Terminate the process with SIGTERM pre-emptively checking
whether PID has been reused.
On Windows this is an alias for kill().
"""
if POSIX:
self._send_signal(signal.SIGTERM)
else: # pragma: no cover
self._proc.kill()
@_assert_pid_not_reused
def kill(self):
"""Kill the current process with SIGKILL pre-emptively checking
whether PID has been reused.
"""
if POSIX:
self._send_signal(signal.SIGKILL)
else: # pragma: no cover
self._proc.kill()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of os.getpid(), also return its exit code, else None.
If the process is already terminated immediately return None
instead of raising NoSuchProcess.
If *timeout* (in seconds) is specified and process is still
alive raise TimeoutExpired.
To wait for multiple Process(es) use psutil.wait_procs().
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._proc.wait(timeout)
# =====================================================================
# --- Popen class
# =====================================================================
class Popen(Process):
"""A more convenient interface to stdlib subprocess.Popen class.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
properties and methods of psutil.Process class as a unified
interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name()
'python'
>>> p.uids()
user(real=1000, effective=1000, saved=1000)
>>> p.username()
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
Unlike subprocess.Popen this class pre-emptively checks whether PID
has been reused on send_signal(), terminate() and kill() so that
you don't accidentally terminate another process, fixing
http://bugs.python.org/issue6973.
For a complete documentation refer to:
http://docs.python.org/3/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
# Explicitly avoid to raise NoSuchProcess in case the process
# spawned by subprocess.Popen terminates too quickly, see:
# https://github.com/giampaolo/psutil/issues/193
self.__subproc = subprocess.Popen(*args, **kwargs)
self._init(self.__subproc.pid, _ignore_nsp=True)
def __dir__(self):
return sorted(set(dir(Popen) + dir(subprocess.Popen)))
def __enter__(self):
if hasattr(self.__subproc, '__enter__'):
self.__subproc.__enter__()
return self
def __exit__(self, *args, **kwargs):
if hasattr(self.__subproc, '__exit__'):
return self.__subproc.__exit__(*args, **kwargs)
else:
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try:
# Flushing a BufferedWriter may raise an error.
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, name))
def wait(self, timeout=None):
if self.__subproc.returncode is not None:
return self.__subproc.returncode
ret = super(Popen, self).wait(timeout)
self.__subproc.returncode = ret
return ret
# The valid attr names which can be processed by Process.as_dict().
_as_dict_attrnames = set(
[x for x in dir(Process) if not x.startswith('_') and x not in
['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
'memory_info_ex', 'oneshot']])
# =====================================================================
# --- system processes related functions
# =====================================================================
def pids():
"""Return a list of current running PIDs."""
global _LOWEST_PID
ret = sorted(_psplatform.pids())
_LOWEST_PID = ret[0]
return ret
def pid_exists(pid):
"""Return True if given PID exists in the current process list.
This is faster than doing "pid in psutil.pids()" and
should be preferred.
"""
if pid < 0:
return False
elif pid == 0 and POSIX:
# On POSIX we use os.kill() to determine PID existence.
# According to "man 2 kill" PID 0 has a special meaning
# though: it refers to <<every process in the process
# group of the calling process>> and that is not we want
# to do here.
return pid in pids()
else:
return _psplatform.pid_exists(pid)
_pmap = {}
_lock = threading.Lock()
def process_iter(attrs=None, ad_value=None):
"""Return a generator yielding a Process instance for all
running processes.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
Cached Process instances are checked for identity so that you're
safe in case a PID has been reused by another process, in which
case the cached instance is updated.
The sorting order in which processes are yielded is based on
their PIDs.
*attrs* and *ad_value* have the same meaning as in
Process.as_dict(). If *attrs* is specified as_dict() is called
and the resulting dict is stored as a 'info' attribute attached
to returned Process instance.
If *attrs* is an empty list it will retrieve all process info
(slow).
"""
def add(pid):
proc = Process(pid)
if attrs is not None:
proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
with _lock:
_pmap[proc.pid] = proc
return proc
def remove(pid):
with _lock:
_pmap.pop(pid, None)
a = set(pids())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
with _lock:
ls = sorted(list(_pmap.items()) +
list(dict.fromkeys(new_pids).items()))
for pid, proc in ls:
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
if attrs is not None:
proc.info = proc.as_dict(
attrs=attrs, ad_value=ad_value)
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
if proc is None and pid in _pmap:
try:
yield _pmap[pid]
except KeyError:
# If we get here it is likely that 2 threads were
# using process_iter().
pass
else:
raise
def wait_procs(procs, timeout=None, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new *returncode* attribute indicating
process exit status (may be None).
*callback* is a function which gets called every time a process
terminates (a Process instance is passed as callback argument).
Function will return as soon as all processes terminate or when
*timeout* occurs.
Differently from Process.wait() it will not raise TimeoutExpired if
*timeout* occurs.
Typical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
>>> for p in alive:
... p.kill()
"""
def check_gone(proc, timeout):
try:
returncode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if returncode is not None or not proc.is_running():
proc.returncode = returncode
gone.add(proc)
if callback is not None:
callback(proc)
if timeout is not None and not timeout >= 0:
msg = "timeout must be a positive integer, got %s" % timeout
raise ValueError(msg)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
if timeout is not None:
deadline = _timer() + timeout
while alive:
if timeout is not None and timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
max_timeout = 1.0 / len(alive)
if timeout is not None:
timeout = min((deadline - _timer()), max_timeout)
if timeout <= 0:
break
check_gone(proc, timeout)
else:
check_gone(proc, max_timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
check_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
# =====================================================================
# --- CPU related functions
# =====================================================================
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If *logical* is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
ret = _psplatform.cpu_count_logical()
else:
ret = _psplatform.cpu_count_physical()
if ret is not None and ret < 1:
ret = None
return ret
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple.
Every CPU time represents the seconds the CPU has spent in the
given mode. The namedtuple's fields availability varies depending on the
platform:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
- steal (Linux >= 2.6.11)
- guest (Linux >= 2.6.24)
- guest_nice (Linux >= 3.2.0)
When *percpu* is True return a list of namedtuples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.cpu_times()
else:
return _psplatform.per_cpu_times()
try:
_last_cpu_times = cpu_times()
except Exception:
# Don't want to crash at import time.
_last_cpu_times = None
try:
_last_per_cpu_times = cpu_times(percpu=True)
except Exception:
# Don't want to crash at import time.
_last_per_cpu_times = None
def _cpu_tot_time(times):
"""Given a cpu_time() ntuple calculates the total CPU time
(including idle time).
"""
tot = sum(times)
if LINUX:
# On Linux guest times are already accounted in "user" or
# "nice" times, so we subtract them from total.
# Htop does the same. References:
# https://github.com/giampaolo/psutil/pull/940
# http://unix.stackexchange.com/questions/178045
# https://github.com/torvalds/linux/blob/
# 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
# cputime.c#L158
tot -= getattr(times, "guest", 0) # Linux 2.6.24+
tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
return tot
def _cpu_busy_time(times):
"""Given a cpu_time() ntuple calculates the busy CPU time.
We do so by subtracting all idle CPU times.
"""
busy = _cpu_tot_time(times)
busy -= times.idle
# Linux: "iowait" is time during which the CPU does not do anything
# (waits for IO to complete). On Linux IO wait is *not* accounted
# in "idle" time so we subtract it. Htop does the same.
# References:
# https://github.com/torvalds/linux/blob/
# 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
busy -= getattr(times, "iowait", 0)
return busy
def _cpu_times_deltas(t1, t2):
assert t1._fields == t2._fields, (t1, t2)
field_deltas = []
for field in _psplatform.scputimes._fields:
field_delta = getattr(t2, field) - getattr(t1, field)
# CPU times are always supposed to increase over time
# or at least remain the same and that's because time
# cannot go backwards.
# Surprisingly sometimes this might not be the case (at
# least on Windows and Linux), see:
# https://github.com/giampaolo/psutil/issues/392
# https://github.com/giampaolo/psutil/issues/645
# https://github.com/giampaolo/psutil/issues/1210
# Trim negative deltas to zero to ignore decreasing fields.
# top does the same. Reference:
# https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
field_delta = max(0, field_delta)
field_deltas.append(field_delta)
return _psplatform.scputimes(*field_deltas)
def cpu_percent(interval=None, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When *interval* is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When *interval* is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately (non
blocking). That means the first time this is called it will
return a meaningless 0.0 value which you should ignore.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When *percpu* is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
Examples:
>>> # blocking, system-wide
>>> psutil.cpu_percent(interval=1)
2.0
>>>
>>> # blocking, per-cpu
>>> psutil.cpu_percent(interval=1, percpu=True)
[2.0, 1.0]
>>>
>>> # non-blocking (percentage since last call)
>>> psutil.cpu_percent(interval=None)
2.9
>>>
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
if interval is not None and interval < 0:
raise ValueError("interval is not positive (got %r)" % interval)
def calculate(t1, t2):
times_delta = _cpu_times_deltas(t1, t2)
all_delta = _cpu_tot_time(times_delta)
busy_delta = _cpu_busy_time(times_delta)
try:
busy_perc = (busy_delta / all_delta) * 100
except ZeroDivisionError:
return 0.0
else:
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
if t1 is None:
# Something bad happened at import time. We'll
# get a meaningful result on the next call. See:
# https://github.com/giampaolo/psutil/pull/715
t1 = cpu_times()
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
if tot1 is None:
# Something bad happened at import time. We'll
# get a meaningful result on the next call. See:
# https://github.com/giampaolo/psutil/pull/715
tot1 = cpu_times(percpu=True)
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
# Use separate global vars for cpu_times_percent() so that it's
# independent from cpu_percent() and they can both be used within
# the same program.
_last_cpu_times_2 = _last_cpu_times
_last_per_cpu_times_2 = _last_per_cpu_times
def cpu_times_percent(interval=None, percpu=False):
"""Same as cpu_percent() but provides utilization percentages
for each specific CPU time as is returned by cpu_times().
For instance, on Linux we'll get:
>>> cpu_times_percent()
cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
>>>
*interval* and *percpu* arguments have the same meaning as in
cpu_percent().
"""
global _last_cpu_times_2
global _last_per_cpu_times_2
blocking = interval is not None and interval > 0.0
if interval is not None and interval < 0:
raise ValueError("interval is not positive (got %r)" % interval)
def calculate(t1, t2):
nums = []
times_delta = _cpu_times_deltas(t1, t2)
all_delta = _cpu_tot_time(times_delta)
# "scale" is the value to multiply each delta with to get percentages.
# We use "max" to avoid division by zero (if all_delta is 0, then all
# fields are 0 so percentages will be 0 too. all_delta cannot be a
# fraction because cpu times are integers)
scale = 100.0 / max(1, all_delta)
for field_delta in times_delta:
field_perc = field_delta * scale
field_perc = round(field_perc, 1)
# make sure we don't return negative values or values over 100%
field_perc = min(max(0.0, field_perc), 100.0)
nums.append(field_perc)
return _psplatform.scputimes(*nums)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times_2
if t1 is None:
# Something bad happened at import time. We'll
# get a meaningful result on the next call. See:
# https://github.com/giampaolo/psutil/pull/715
t1 = cpu_times()
_last_cpu_times_2 = cpu_times()
return calculate(t1, _last_cpu_times_2)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times_2
if tot1 is None:
# Something bad happened at import time. We'll
# get a meaningful result on the next call. See:
# https://github.com/giampaolo/psutil/pull/715
tot1 = cpu_times(percpu=True)
_last_per_cpu_times_2 = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times_2):
ret.append(calculate(t1, t2))
return ret
def cpu_stats():
"""Return CPU statistics."""
return _psplatform.cpu_stats()
if hasattr(_psplatform, "cpu_freq"):
def cpu_freq(percpu=False):
"""Return CPU frequency as a nameduple including current,
min and max frequency expressed in Mhz.
If *percpu* is True and the system supports per-cpu frequency
retrieval (Linux only) a list of frequencies is returned for
each CPU. If not a list with one element is returned.
"""
ret = _psplatform.cpu_freq()
if percpu:
return ret
else:
num_cpus = float(len(ret))
if num_cpus == 0:
return None
elif num_cpus == 1:
return ret[0]
else:
currs, mins, maxs = 0.0, 0.0, 0.0
set_none = False
for cpu in ret:
currs += cpu.current
# On Linux if /proc/cpuinfo is used min/max are set
# to None.
if LINUX and cpu.min is None:
set_none = True
continue
mins += cpu.min
maxs += cpu.max
current = currs / num_cpus
if set_none:
min_ = max_ = None
else:
min_ = mins / num_cpus
max_ = maxs / num_cpus
return _common.scpufreq(current, min_, max_)
__all__.append("cpu_freq")
if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
# Perform this hasattr check once on import time to either use the
# platform based code or proxy straight from the os module.
if hasattr(os, "getloadavg"):
getloadavg = os.getloadavg
else:
getloadavg = _psplatform.getloadavg
__all__.append("getloadavg")
# =====================================================================
# --- system memory related functions
# =====================================================================
def virtual_memory():
"""Return statistics about system memory usage as a namedtuple
including the following fields, expressed in bytes:
- total:
total physical memory available.
- available:
the memory that can be given instantly to processes without the
system going into swap.
This is calculated by summing different memory values depending
on the platform and it is supposed to be used to monitor actual
memory usage in a cross platform fashion.
- percent:
the percentage usage calculated as (total - available) / total * 100
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
macOS: active + wired
BSD: active + wired + cached
Linux: total - free
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
Platform-specific fields:
- active (UNIX):
memory currently in use or very recently used, and so it is in RAM.
- inactive (UNIX):
memory that is marked as not used.
- buffers (BSD, Linux):
cache for things like file system metadata.
- cached (BSD, macOS):
cache for various things.
- wired (macOS, BSD):
memory that is marked to always stay in RAM. It is never moved to disk.
- shared (BSD):
memory that may be simultaneously accessed by multiple processes.
The sum of 'used' and 'available' does not necessarily equal total.
On Windows 'available' and 'free' are the same.
"""
global _TOTAL_PHYMEM
ret = _psplatform.virtual_memory()
# cached for later use in Process.memory_percent()
_TOTAL_PHYMEM = ret.total
return ret
def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following fields:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory()
# =====================================================================
# --- disks/paritions related functions
# =====================================================================
def disk_usage(path):
"""Return disk usage statistics about the given *path* as a
namedtuple including total, used and free space expressed in bytes
plus the percentage usage.
"""
return _psplatform.disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of
(device, mountpoint, fstype, opts) namedtuple.
'opts' field is a raw string separated by commas indicating mount
options which may vary depending on the platform.
If *all* parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
def disk_io_counters(perdisk=False, nowrap=True):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
- read_count: number of reads
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- read_time: time spent reading from disk (in ms)
- write_time: time spent writing to disk (in ms)
Platform specific:
- busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
- read_merged_count (Linux): number of merged reads
- write_merged_count (Linux): number of merged writes
If *perdisk* is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedtuple
described above as the values.
If *nowrap* is True it detects and adjust the numbers which overflow
and wrap (restart from 0) and add "old value" to "new value" so that
the returned numbers will always be increasing or remain the same,
but never decrease.
"disk_io_counters.cache_clear()" can be used to invalidate the
cache.
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
kwargs = dict(perdisk=perdisk) if LINUX else {}
rawdict = _psplatform.disk_io_counters(**kwargs)
if not rawdict:
return {} if perdisk else None
if nowrap:
rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
if perdisk:
for disk, fields in rawdict.items():
rawdict[disk] = nt(*fields)
return rawdict
else:
return nt(*[sum(x) for x in zip(*rawdict.values())])
disk_io_counters.cache_clear = functools.partial(
_wrap_numbers.cache_clear, 'psutil.disk_io_counters')
disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
# =====================================================================
# --- network related functions
# =====================================================================
def net_io_counters(pernic=False, nowrap=True):
"""Return network I/O statistics as a namedtuple including
the following fields:
- bytes_sent: number of bytes sent
- bytes_recv: number of bytes received
- packets_sent: number of packets sent
- packets_recv: number of packets received
- errin: total number of errors while receiving
- errout: total number of errors while sending
- dropin: total number of incoming packets which were dropped
- dropout: total number of outgoing packets which were dropped
(always 0 on macOS and BSD)
If *pernic* is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
If *nowrap* is True it detects and adjust the numbers which overflow
and wrap (restart from 0) and add "old value" to "new value" so that
the returned numbers will always be increasing or remain the same,
but never decrease.
"disk_io_counters.cache_clear()" can be used to invalidate the
cache.
"""
rawdict = _psplatform.net_io_counters()
if not rawdict:
return {} if pernic else None
if nowrap:
rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _common.snetio(*fields)
return rawdict
else:
return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
net_io_counters.cache_clear = functools.partial(
_wrap_numbers.cache_clear, 'psutil.net_io_counters')
net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
def net_connections(kind='inet'):
"""Return system-wide socket connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
In case of limited privileges 'fd' and 'pid' may be set to -1
and None respectively.
The *kind* parameter filters for connections that fit the
following criteria:
+------------+----------------------------------------------------+
| Kind Value | Connections using |
+------------+----------------------------------------------------+
| inet | IPv4 and IPv6 |
| inet4 | IPv4 |
| inet6 | IPv6 |
| tcp | TCP |
| tcp4 | TCP over IPv4 |
| tcp6 | TCP over IPv6 |
| udp | UDP |
| udp4 | UDP over IPv4 |
| udp6 | UDP over IPv6 |
| unix | UNIX socket (both UDP and TCP protocols) |
| all | the sum of all the possible families and protocols |
+------------+----------------------------------------------------+
On macOS this function requires root privileges.
"""
return _psplatform.net_connections(kind)
def net_if_addrs():
"""Return the addresses associated to each NIC (network interface
card) installed on the system as a dictionary whose keys are the
NIC names and value is a list of namedtuples for each address
assigned to the NIC. Each namedtuple includes 5 fields:
- family: can be either socket.AF_INET, socket.AF_INET6 or
psutil.AF_LINK, which refers to a MAC address.
- address: is the primary address and it is always set.
- netmask: and 'broadcast' and 'ptp' may be None.
- ptp: stands for "point to point" and references the
destination address on a point to point interface
(typically a VPN).
- broadcast: and *ptp* are mutually exclusive.
Note: you can have more than one address of the same family
associated with each interface.
"""
has_enums = sys.version_info >= (3, 4)
if has_enums:
import socket
rawlist = _psplatform.net_if_addrs()
rawlist.sort(key=lambda x: x[1]) # sort by family
ret = collections.defaultdict(list)
for name, fam, addr, mask, broadcast, ptp in rawlist:
if has_enums:
try:
fam = socket.AddressFamily(fam)
except ValueError:
if WINDOWS and fam == -1:
fam = _psplatform.AF_LINK
elif (hasattr(_psplatform, "AF_LINK") and
_psplatform.AF_LINK == fam):
# Linux defines AF_LINK as an alias for AF_PACKET.
# We re-set the family here so that repr(family)
# will show AF_LINK rather than AF_PACKET
fam = _psplatform.AF_LINK
if fam == _psplatform.AF_LINK:
# The underlying C function may return an incomplete MAC
# address in which case we fill it with null bytes, see:
# https://github.com/giampaolo/psutil/issues/786
separator = ":" if POSIX else "-"
while addr.count(separator) < 5:
addr += "%s00" % separator
ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
return dict(ret)
def net_if_stats():
"""Return information about each NIC (network interface card)
installed on the system as a dictionary whose keys are the
NIC names and value is a namedtuple with the following fields:
- isup: whether the interface is up (bool)
- duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
NIC_DUPLEX_UNKNOWN
- speed: the NIC speed expressed in mega bits (MB); if it can't
be determined (e.g. 'localhost') it will be set to 0.
- mtu: the maximum transmission unit expressed in bytes.
"""
return _psplatform.net_if_stats()
# =====================================================================
# --- sensors
# =====================================================================
# Linux, macOS
if hasattr(_psplatform, "sensors_temperatures"):
def sensors_temperatures(fahrenheit=False):
"""Return hardware temperatures. Each entry is a namedtuple
representing a certain hardware sensor (it may be a CPU, an
hard disk or something else, depending on the OS and its
configuration).
All temperatures are expressed in celsius unless *fahrenheit*
is set to True.
"""
def convert(n):
if n is not None:
return (float(n) * 9 / 5) + 32 if fahrenheit else n
ret = collections.defaultdict(list)
rawdict = _psplatform.sensors_temperatures()
for name, values in rawdict.items():
while values:
label, current, high, critical = values.pop(0)
current = convert(current)
high = convert(high)
critical = convert(critical)
if high and not critical:
critical = high
elif critical and not high:
high = critical
ret[name].append(
_common.shwtemp(label, current, high, critical))
return dict(ret)
__all__.append("sensors_temperatures")
# Linux, macOS
if hasattr(_psplatform, "sensors_fans"):
def sensors_fans():
"""Return fans speed. Each entry is a namedtuple
representing a certain hardware sensor.
All speed are expressed in RPM (rounds per minute).
"""
return _psplatform.sensors_fans()
__all__.append("sensors_fans")
# Linux, Windows, FreeBSD, macOS
if hasattr(_psplatform, "sensors_battery"):
def sensors_battery():
"""Return battery information. If no battery is installed
returns None.
- percent: battery power left as a percentage.
- secsleft: a rough approximation of how many seconds are left
before the battery runs out of power. May be
POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
- power_plugged: True if the AC power cable is connected.
"""
return _psplatform.sensors_battery()
__all__.append("sensors_battery")
# =====================================================================
# --- other system related functions
# =====================================================================
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
# Note: we are not caching this because it is subject to
# system clock updates.
return _psplatform.boot_time()
def users():
"""Return users currently connected on the system as a list of
namedtuples including the following fields.
- user: the name of the user
- terminal: the tty or pseudo-tty associated with the user, if any.
- host: the host name associated with the entry, if any.
- started: the creation time as a floating point number expressed in
seconds since the epoch.
"""
return _psplatform.users()
# =====================================================================
# --- Windows services
# =====================================================================
if WINDOWS:
def win_service_iter():
"""Return a generator yielding a WindowsService instance for all
Windows services installed.
"""
return _psplatform.win_service_iter()
def win_service_get(name):
"""Get a Windows service by *name*.
Raise NoSuchProcess if no service with such name exists.
"""
return _psplatform.win_service_get(name)
# =====================================================================
def test(): # pragma: no cover
from ._common import bytes2human
from ._compat import get_terminal_size
today_day = datetime.date.today()
templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
'create_time', 'memory_info', 'status', 'nice', 'username']
print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE",
"STATUS", "START", "TIME", "CMDLINE"))
for p in process_iter(attrs, ad_value=None):
if p.info['create_time']:
ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
if ctime.date() == today_day:
ctime = ctime.strftime("%H:%M")
else:
ctime = ctime.strftime("%b%d")
else:
ctime = ''
if p.info['cpu_times']:
cputime = time.strftime("%M:%S",
time.localtime(sum(p.info['cpu_times'])))
else:
cputime = ''
user = p.info['username'] or ''
if not user and POSIX:
try:
user = p.uids()[0]
except Error:
pass
if user and WINDOWS and '\\' in user:
user = user.split('\\')[1]
user = user[:9]
vms = bytes2human(p.info['memory_info'].vms) if \
p.info['memory_info'] is not None else ''
rss = bytes2human(p.info['memory_info'].rss) if \
p.info['memory_info'] is not None else ''
memp = round(p.info['memory_percent'], 1) if \
p.info['memory_percent'] is not None else ''
nice = int(p.info['nice']) if p.info['nice'] else ''
if p.info['cmdline']:
cmdline = ' '.join(p.info['cmdline'])
else:
cmdline = p.info['name']
status = p.info['status'][:5] if p.info['status'] else ''
line = templ % (
user[:10],
p.info['pid'],
memp,
vms,
rss,
nice,
status,
ctime,
cputime,
cmdline)
print(line[:get_terminal_size()[0]])
del memoize, memoize_when_activated, division, deprecated_method
if sys.version_info[0] < 3:
del num, x
if __name__ == "__main__":
test()
|
py | b400ec887f748b6eae19abbe5f810685c06e3d8e | from ..common import Node
from ..util import LoggingMixin
from bos_consensus.network import BaseTransport
class BaseBlockchain(LoggingMixin):
node = None
transport = None
def __init__(self, node, transport):
assert isinstance(node, Node)
assert isinstance(transport, BaseTransport)
super(BaseBlockchain, self).__init__()
self.node = node
self.transport = transport
self.set_logging('blockchain', node=self.node.name)
def __repr__(self):
return '<Consensus: %s(%s)>' % (self.node_name, self.endpoint)
@property
def node_name(self):
return self.node.name
@property
def endpoint(self):
return self.node.endpoint
def to_dict(self):
return dict(
node=self.node.to_dict()
)
|
py | b400ecd6f53bb816bb71bdd8431d89e18f70dfcd | from flask import render_template, redirect, url_for, flash, request, jsonify, json
from app import app, db, mail
from app.forms import SignupForm, LoginForm, ForgetForm, PasswordChangeForm, PasswordResetForm, Chatbot, DonateForm
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
from flask_mail import Message
import pymongo
import random
from datetime import datetime
from threading import Thread
# import boto3
# from pathlib import Path
from pythainlp.tokenize import word_tokenize
from pythainlp.tag import pos_tag
import re
@app.route('/')
def home():
form = Chatbot()
return render_template('home.html', form=form)
"""
Login and user sub-system
"""
@app.route('/signup', methods=('GET', 'POST'))
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = SignupForm()
if form.validate_on_submit():
# Get data from form
name = form.name.data
email = form.email.data
password = form.password.data
password_check = form.password_check.data
# Check if email already exist
email_exist = User.query.filter_by(email=email).first()
if email_exist:
comment = f"อีเมล {email} เคยลงทะเบียนไว้แล้ว"
return render_template('signup-error.html', comment=comment)
# Check if passwords match
if password == password_check:
password_final = password
else:
comment = "คุณพิมพ์รหัสผ่านสองช่องไม่ตรงกัน"
return render_template('signup-error.html', comment=comment)
# Create user with name, email, password
new_user = User(name=name, email=email)
new_user.set_password(password_final)
db.session.add(new_user)
db.session.commit()
# Give confirmation, login, and redirect to profile page
user = User.query.filter_by(email=form.email.data).first()
login_user(user)
flash("ลงทะเบียนสำเร็จ และล็อกอินเรียบร้อยแล้ว")
return redirect('/profile')
return render_template('signup.html', form=form)
# Function to send mail using thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
@app.route('/forget', methods=('GET', 'POST'))
def forget():
form = ForgetForm()
if form.validate_on_submit():
# Get data from form
email = form.email.data
# Check if entered email is an existing user or not
user = User.query.filter_by(email=email).first()
if user is None:
# Return comment and error type
comment = "ไม่พบอีเมลที่กรอกในระบบสมาชิก"
error_type = "wrong_email"
return render_template('forget-result.html', comment=comment, error_type=error_type)
# If email exists, proceed to password recovery process
else:
# Generate password_reset_id
rand_universe = [1,2,3,4,5,6,7,8,9,"a","b","c","d","e","f","g","A","B","C","D","E","F","G"]
rand_str = ""
rand_list = random.sample(rand_universe, k=12)
password_reset_id = rand_str.join([str(i) for i in rand_list])
# Insert password_reset_id in db for this user
user.password_reset_id = password_reset_id
db.session.commit()
# Send an email to user
"""
!!! MUST CUSTOMISE MESSAGE BODY IN IMPLEMENTATION !!!
"""
msg = Message(subject='[chatbotjaidee.com] รีเซ็ตรหัสผ่าน',
sender = '[email protected]',
recipients = [email]) # <<< CONFIGURE WEBSITE URL
msg.body = ("คุณได้กดขอรหัสผ่านใหม่จากเว็บ chatbotjaidee.com กรุณากดลิงก์นี้ https://chatbotjaidee.com/password-reset/" + password_reset_id + " เพื่อตั้งรหัสผ่านใหม่") # <<< CONFIGURE EMAIL MESSAGE AND URL
Thread(target=send_async_email, args=(app, msg)).start() # Send mail asynchronously
# Return comment
comment = "เราได้ส่งคำแนะนำในการตั้งรหัสผ่านใหม่ไปยังอีเมลของท่านแล้ว"
return render_template('forget-result.html', comment=comment)
return render_template('forget.html', form=form)
# Password recovery API endpoint
@app.route('/password-reset/<string:password_reset_id>')
def password_reset(password_reset_id):
# Check if password_reset_id is valid or not
user = User.query.filter_by(password_reset_id=password_reset_id).first()
if user is None:
flash("ลิงก์รีเซ็ตรหัสผ่านไม่ผ่านการตรวจสอบ หรือได้ใช้ลิงก์นี้ไปแล้ว")
return redirect('/')
# If password_reset_id is valid, proceed to reset password
else:
form = PasswordResetForm()
return render_template('password-reset.html', password_reset_id=password_reset_id, form=form)
@app.route('/password-reset-result', methods=('GET', 'POST'))
def password_reset_result():
form = PasswordResetForm()
if form.validate_on_submit():
# Get data from form
password_reset_id = form.password_reset_id.data
password_new = form.password_new.data
password_new_check = form.password_new_check.data
# Get the user who belong to this password_reset_id
user = User.query.filter_by(password_reset_id=password_reset_id).first()
# Check if new passwords match each other
if password_new != password_new_check:
# Return comment and error type
comment = "คุณพิมพ์รหัสผ่านสองช่องไม่ตรงกัน"
error_type = "unmatched_password_check_reset"
return render_template('password-change-result.html', comment=comment, error_type=error_type, password_reset_id=password_reset_id)
# Proceed if passwords check passed
else:
# Generate new password hash
user.set_password(password_new)
# Update password_reset_id with blank string so the id can be used only this time only
# and can't be used in API
user.password_reset_id = ""
db.session.commit()
# Login user instantly
login_user(user)
flash("ล็อกอินเรียบร้อยแล้ว")
# Return comment
comment = "กรุณาใช้รหัสผ่านใหม่เมื่อล็อกอินครั้งถัดไป"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change-result.html')
@app.route('/login', methods=('GET', 'POST'))
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
return render_template('fail.html')
login_user(user)
# Update lastlogin_dt to the current time
user.lastlogin_dt = datetime.now()
db.session.commit()
flash("ล็อกอินสำเร็จ")
return redirect('/profile')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("ออกจากระบบเรียบร้อยแล้ว")
return redirect(url_for('home'))
@app.route('/password-change', methods=('GET', 'POST'))
@login_required
def password_change():
form = PasswordChangeForm()
if form.validate_on_submit():
# Get data from form
pass_current = form.password_current.data
pass_new = form.password_new.data
pass_new_check = form.password_new_check.data
# Connect to db
user = User.query.filter_by(id=current_user.id).first()
# Check if current pass matches pass in db
if not user.check_password(pass_current):
# Return comment and error type
comment = "คุณใส่รหัสผ่านปัจจุบันไม่ถูกต้อง"
error_type = "wrong_pass_current"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Check if new passwords match each other
elif pass_new != pass_new_check:
# Return comment and error type
comment = "คุณพิมพ์รหัสผ่านสองช่องไม่ตรงกัน"
error_type = "unmatched_password_check"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Proceed if 2 above checks passed
else:
# Generate new password hash
user.set_password(pass_new)
db.session.commit()
# Return comment
comment = "กรุณาใช้รหัสผ่านใหม่เมื่อล็อกอินครั้งถัดไป"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change.html', form=form)
"""
Profile
"""
@app.route('/profile')
@login_required
def profile():
user = User.query.filter_by(id=current_user.id).first()
user_id = user.id
user_name = user.name
user_email = user.email
user_role = user.role
user_create_dt = user.create_dt
user_lastlogin_dt = user.lastlogin_dt
return render_template('profile.html', user_id=user_id, user_name=user_name, user_email=user_email, user_role=user_role, user_create_dt=user_create_dt, user_lastlogin_dt=user_lastlogin_dt)
"""
Search API
"""
def gender_convert(gender):
if gender == 'male':
return 'ผู้ชาย'
elif gender == 'female':
return 'ผู้หญิง'
else:
return ' LGBTQ'
# Function to save search_record using thread
def search_record_insert(app, record):
with app.app_context():
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_SOLUTION_URI'])
mongodb = client.jaidee
mongodb.search_record.insert_one(record)
@app.route('/search/api', methods=('GET', 'POST'))
def search_api():
if request.method == 'POST':
# Get request JSON and parse as dict
input_request = request.get_json()
# Remove Zero-width Space to prevent white block display
input_request['request'] = input_request['request'].replace('\u200b', '')
# Process input request using PythaiNLP
word_list_raw = word_tokenize(input_request['request'], engine='newmm', keep_whitespace=False)
word_list_pos = pos_tag(word_list_raw, corpus='pud')
print(f"Words deconstruction: {word_list_pos}")
word_list = []
for word in word_list_pos:
if word[1] == 'NOUN' or word[1] == 'VERB' or word[1] == 'ADJ' or word[1] == 'PROPN':
word_list.append(word[0])
# Prepare regex
word_regex_and = '.*'.join(word_list)
word_regex_or = '|'.join(word_list)
regex_and = re.compile(word_regex_and)
regex_or = re.compile(word_regex_or)
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_SOLUTION_URI'])
mongodb = client.jaidee
"""
'topic' COLUMN is INDEXED. If the db is updated,
may need to DROP and CREATE topic index.
"""
# Find the db using the given regex
data = []
for result in mongodb.solution.find({'topic': {'$regex': regex_and}}):
data.append({
"gender": gender_convert(result['gender']),
"age": result['age'],
"area": result['area'],
"topic": result['topic'],
"solution": result['solution'],
"mode": 1 # 1 means strict mode
})
# Insert search term and result count into MongoDB
record = {
'datetime': datetime.now(),
'search_term': input_request['request'],
'result_count': len(data)
}
Thread(target=search_record_insert, args=(app, record)).start() # Save record asynchronously
# Output the data
if len(data) > 0: # If there's at least one result
# Random shuffle data list
random.shuffle(data)
print(f"Data length: {len(data)}")
print(f"Regex AND: {regex_and}")
return jsonify(data)
else: # If no result, use OR regex
for result in mongodb.solution.find({'topic': {'$regex': regex_or}}):
data.append({
"gender": gender_convert(result['gender']),
"age": result['age'],
"area": result['area'],
"topic": result['topic'],
"solution": result['solution'],
"mode": 2 # 2 means loose mode
})
if len(data) > 0: # If there's result
# Random shuffle data list
random.shuffle(data)
print(f"Data length: {len(data)}")
print(f"Regex OR: {regex_or}")
return jsonify(data)
else: # If there's no result at all
data.append({"result": 0})
print(f"Data length: {len(data)}")
print(f"Regex OR: {regex_or}")
return jsonify(data)
# Function to insert feedback to MongoDB using thread
def send_feedback(app, data_dict):
with app.app_context():
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_SOLUTION_URI'])
mongodb = client.jaidee
mongodb.donate.insert_one(data_dict)
@app.route('/feedback/api', methods=('GET', 'POST'))
def feedback_api():
if request.method == 'POST':
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_SOLUTION_URI'])
mongodb = client.jaidee
# Get request JSON and parse as dict
input_request = request.get_json()
# Prepare datetime object
now = datetime.now()
# Prepare data dict to record to MongoDB
data_dict = {
"record_date": now,
"score": input_request['score']
}
# Insert the document
mongodb.feedback.insert_one(data_dict)
return jsonify({"status": "posted"})
"""
Static
"""
@app.route('/about')
def about():
return render_template('about.html')
"""
Donate
"""
@app.route('/donate', methods=('GET', 'POST'))
def donate():
form = DonateForm()
if form.validate_on_submit():
# Get data from form
topic = form.topic.data
solution = form.solution.data
age = form.age.data
gender = form.gender.data
province = form.province.data
# Prepare datetime object
now = datetime.now()
# Prepare data dict to record to MongoDB
data_dict = {
"record_date": now,
"topic": topic,
"solution": solution,
"age": age,
"gender": gender,
"province": province
}
Thread(target=send_feedback, args=(app, data_dict)).start() # Send feedback asynchronously
# Issue a cert
# Connect to certificate collection in db
client = pymongo.MongoClient(app.config['DB_SOLUTION_URI'])
cert_db = client.jaidee
# Generate hash as cert_id
rand_universe = [1,2,3,4,5,6,7,8,9,"a","b","c","d","e","f","g","A","B","C","D","E","F","G"]
rand_str = ""
rand_list = random.sample(rand_universe, k=12)
cert_id = rand_str.join([str(i) for i in rand_list])
# Inser cert_id in cert db (MongoDB)
cert_db.jaidee_cert.insert_one({"hash": cert_id})
return redirect(f'/donated/{cert_id}')
else:
return render_template('donate.html', form=form)
@app.route('/donated/<string:cert_id>')
def donated(cert_id):
return render_template('donated.html', cert_id=cert_id) |
py | b400ece16a267584e55d32f14db126130ab54029 | import torch
import numpy as np
from sketchgraphs_models.graph.dataset import graph_info_from_sequence
from sketchgraphs.data import sketch as data_sketch, sequence as data_sequence
from torch_geometric.data import Data
from sketchgraphs_models.graph import dataset
from sketchgraphs_models.graph.train.data_loading import load_sequences_and_mappings
def load_homogeneous_dataset(dataset_file, auxiliary_file, quantization, seed=None,
force_entity_categorical_features=False):
data = load_sequences_and_mappings(dataset_file, auxiliary_file, quantization, False, False)
if data['entity_feature_mapping'] is None and force_entity_categorical_features:
# Create an entity mapping which only computes the categorical features (i.e. isConstruction and clockwise)
data['entity_feature_mapping'] = dataset.EntityFeatureMapping()
return HomogeneousGraphDataset(
data['sequences'], data['entity_feature_mapping'], data['edge_feature_mapping'], seed=seed), data['weights']
class HomogeneousGraphDataset(torch.utils.data.Dataset):
def __init__(self, sequences, node_feature_mapping=None, edge_feature_mapping=None, seed=None):
self.sequences = sequences
self.rng = np.random.RandomState(seed)
self.edge_feature_mapping = edge_feature_mapping
self.node_feature_mapping = node_feature_mapping
def __len__(self):
return len(self.sequences)
def __getitem__(self, idx):
idx = idx % len(self) # allows using batch size larger than dataset
seq = self.sequences[idx]
# Exclude first step since we always start w/ external node
# Exclude subnode edges since they can be inferred by the subnode op.
step_indices = [i for i, op in enumerate(seq) if i > 0 and not _is_subnode_edge(op)]
step_idx = self.rng.choice(step_indices)
try:
graph = graph_info_from_sequence(seq[:step_idx], self.node_feature_mapping, self.edge_feature_mapping)
except Exception as e:
raise ValueError('Failed to process sequence at index {0}'.format(idx)) from e
target = seq[step_idx]
return Data(x = graph.node_features, edge_index = graph.incidence, edge_attr= graph.edge_features), target
def _is_subnode_edge(op):
return isinstance(op, data_sequence.EdgeOp) and op.label == data_sketch.ConstraintType.Subnode
|
py | b400ede8419eaeb993c394b4fbaca25a6ff31ffe | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .layer_libs import ConvBNReLU, ConvBN, SeparableConvBNReLU, DepthwiseConvBN, AuxLayer, SyncBatchNorm
from .activation import Activation
from .pyramid_pool import ASPPModule, PPModule
from .attention import AttentionBlock
from .nonlocal2d import NonLocal2D
|
py | b400eee65e0cf6e2a1c00eac77f762e3f32542ad | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from tests.integration.external_only.external_backend_test_utils import *
from tests.integration.integ_test_utils import *
def testStoppingWithoutExplicitStop(integ_spark_conf):
return_code = launch(integ_spark_conf, "examples/tests/H2OContextWithoutExplicitStop.py")
time.sleep(10)
assert noYarnApps()
assert return_code == 0, "Process ended in a wrong way. It ended with return code " + str(return_code)
def testStoppingWithExplicitStop(integ_spark_conf):
return_code = launch(integ_spark_conf, "examples/tests/H2OContextWithExplicitStop.py")
time.sleep(10)
assert noYarnApps()
assert return_code == 0, "Process ended in a wrong way. It ended with return code " + str(return_code)
|
py | b400ef2a7743102ee8c2685cf0120d45581a00d1 | """locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
#Use include() to add paths from the catalog application
from django.urls import include
#Add URL maps to redirect the base URL to our application
from django.views.generic import RedirectView
#Use static() to add url mapping to serve static files during development (only)
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('catalog/', include('catalog.urls')), #Whenever a URL that starts with catalog/ is received, the URLConf module catalog.urls will process the remaining substring
path('', RedirectView.as_view(url='catalog/')),
path('accounts/', include('django.contrib.auth.urls')), #Add Django site authentication urls (for login, logout, password management)
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) |
py | b400ef38d72bdd60254ff7280b59fd02e3db70ee | #!/usr/bin/env python
#
# test_data_generator fs ChRIS plugin app
#
# (c) 2016-2019 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# [email protected]
#
import os
import sys
sys.path.append(os.path.dirname(__file__))
# import the Chris app superclass
from chrisapp.base import ChrisApp
from os import listdir, sep
from os.path import abspath, basename, isdir
from distutils.dir_util import copy_tree
import shutil
import time
import glob
Gstr_title = """
_ _ _ _ _
| | | | | | | | | |
| |_ ___ ___| |_ __| | __ _| |_ __ _ __ _ ___ _ __ ___ _ __ __ _| |_ ___ _ __
| __/ _ \/ __| __| / _` |/ _` | __/ _` | / _` |/ _ \ '_ \ / _ \ '__/ _` | __/ _ \| '__|
| || __/\__ \ |_ | (_| | (_| | || (_| | | (_| | __/ | | | __/ | | (_| | || (_) | |
\__\___||___/\__| \__,_|\__,_|\__\__,_| \__, |\___|_| |_|\___|_| \__,_|\__\___/|_|
______ ______ __/ |
|______| |______|___/
"""
Gstr_synopsis = """
(Edit this in-line help for app specifics. At a minimum, the
flags below are supported -- in the case of DS apps, both
positional arguments <inputDir> and <outputDir>; for FS apps
only <outputDir> -- and similarly for <in> <out> directories
where necessary.)
NAME
test_data_generator.py
SYNOPSIS
python test_data_generator.py \\
[-h] [--help] \\
[--json] \\
[--man] \\
[--meta] \\
[--savejson <DIR>] \\
[-v <level>] [--verbosity <level>] \\
[--version] \\
<inputDir> \\
<outputDir>
BRIEF EXAMPLE
* Copy the (container) internal data to the output directory:
mkdir in out && chmod 777 out
python test_data_generator.py out
DESCRIPTION
`test_data_generator.py` copies internal 'test' data to the
<outputDir>.
ARGS
[-h] [--help]
If specified, show help message and exit.
[--json]
If specified, show json representation of app and exit.
[--man]
If specified, print (this) man page and exit.
[--meta]
If specified, print plugin meta data and exit.
[--savejson <DIR>]
If specified, save json representation file to DIR and exit.
[-v <level>] [--verbosity <level>]
Verbosity level for app. Not used currently.
[--version]
If specified, print version number and exit.
"""
class Test_data_generator(ChrisApp):
"""
Outputs test data for classification module (CNI).
"""
AUTHORS = 'AWC ([email protected])'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'Outputs test data for classification module (CNI)'
CATEGORY = ''
TYPE = 'fs'
DESCRIPTION = 'Outputs test data for classification module (CNI)'
DOCUMENTATION = 'http://wiki'
VERSION = '0.1'
ICON = '' # url of an icon image
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Use this dictionary structure to provide key-value output descriptive information
# that may be useful for the next downstream plugin. For example:
#
# {
# "finalOutputFile": "final/file.out",
# "viewer": "genericTextViewer",
# }
#
# The above dictionary is saved when plugin is called with a ``--saveoutputmeta``
# flag. Note also that all file paths are relative to the system specified
# output directory.
OUTPUT_META_DICT = {}
def define_parameters(self):
"""
Define the CLI arguments accepted by this plugin app.
Use self.add_argument to specify a new app argument.
"""
def run(self, options):
"""
Define the code to be run by this plugin app.
"""
print(Gstr_title)
print('Version: %s' % self.get_version())
str_srcDir = '../data'
print('\nCopying files from:\n\t%s\n to:\n\t%s.....\n' % (str_srcDir, options.outputdir))
copy_tree(str_srcDir, options.outputdir)
def show_man_page(self):
"""
Print the app's man page.
"""
print(Gstr_synopsis)
# ENTRYPOINT
if __name__ == "__main__":
chris_app = Test_data_generator()
chris_app.launch()
|
py | b400ef75c398c32f9653a922671fe2a7d8818952 | # If you had a sorted list of numbers
# How about if you need to sort a list of numbers
# When would you use, say,
# Do you know what "blocking" is.
# I have two threads running simulataneously
class LinkedNode:
def __init__(self, value, next = None):
self.value = value
self.next = next
def array_to_linked_nodes(initial_list):
def value_to_node(value):
return LinkedNode(value)
nodes = list(map(value_to_node, initial_list))
for n in range(len(nodes) - 1):
nodes[n].next = nodes[n + 1]
return nodes[0]
def print_linked_list(head):
# just for debugging
current = head
while current is not None:
print(current.value)
current = current.next
def reverse_linked_list(head):
previous = None
current = head
while current is not None:
next_node = current.next
current.next = previous
previous = current
current = next_node
return previous
head = array_to_linked_nodes([1, 2, 3, 4, 5])
print_linked_list(head)
# reversing
reversed_node = reverse_linked_list(head)
print()
print_linked_list(reversed_node) |
py | b400efe6fee4d9bd1b2c796bc37791db2cfab1ce | #!/usr/bin/python
from unicorn import *
from unicorn.mips_const import *
import regress
def hook_intr(uc, intno, _):
print 'interrupt', intno
CODE = 0x400000
asm = '0000a48f'.decode('hex') # lw $a0, ($sp)
class MipsExcept(regress.RegressTest):
def runTest(self):
uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN)
uc.hook_add(UC_HOOK_INTR, hook_intr)
uc.mem_map(CODE, 0x1000)
uc.mem_write(CODE, asm)
with self.assertRaises(UcError) as m:
uc.reg_write(UC_MIPS_REG_SP, 0x400001)
uc.emu_start(CODE, CODE + len(asm), 300)
self.assertEqual(UC_ERR_READ_UNALIGNED, m.exception.errno)
with self.assertRaises(UcError) as m:
uc.reg_write(UC_MIPS_REG_SP, 0xFFFFFFF0)
uc.emu_start(CODE, CODE + len(asm), 200)
self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno)
with self.assertRaises(UcError) as m:
uc.reg_write(UC_MIPS_REG_SP, 0x80000000)
uc.emu_start(CODE, CODE + len(asm), 100)
self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno)
if __name__ == '__main__':
regress.main()
|
py | b400f02eef193945e7d8dbfd8c77474346d10f6a | # Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import os
import json
import pytest
import gluetool
import gluetool_modules_framework.helpers.ansible
import gluetool_modules_framework.libs.testing_environment
import gluetool_modules_framework.libs.guest as guest_module
import mock
from mock import MagicMock
from . import create_module, check_loadable
@pytest.fixture(name='module')
def fixture_module():
module = create_module(gluetool_modules_framework.helpers.ansible.Ansible)[1]
module._config['ansible-playbook-options'] = []
module._config['ansible-playbook-filepath'] = '/usr/bin/ansible-playbook'
return module
@pytest.fixture(name='local_guest')
def fixture_local_guest(module):
guest = guest_module.NetworkedGuest(module, '127.0.0.1', key='dummy_key')
guest.environment = gluetool_modules_framework.libs.testing_environment.TestingEnvironment(
arch='x86_64',
compose='dummy-compose'
)
return guest
@pytest.fixture(name='assert_output')
def fixture_assert_output():
# https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test
yield
assert os.path.exists(gluetool_modules_framework.helpers.ansible.ANSIBLE_OUTPUT)
os.unlink(gluetool_modules_framework.helpers.ansible.ANSIBLE_OUTPUT)
def test_sanity(module):
pass
def test_loadable(module):
check_loadable(module.glue, 'gluetool_modules_framework/helpers/ansible.py', 'Ansible')
def test_shared(module):
assert module.glue.has_shared('run_playbook')
def test_run_playbook_json(module, local_guest, monkeypatch, assert_output):
json_output = {'task': 'ok'}
mock_output = MagicMock(exit_code=0, stdout=json.dumps(json_output), stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
output = module.run_playbook('dummy playbook file', local_guest, json_output=True)
assert output.execution_output is mock_output
assert output.json_output == json_output
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
os.path.abspath('dummy playbook file')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'json'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_run_playbook_plaintext(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout='', stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
output = module.run_playbook('dummy playbook file', local_guest)
assert output.execution_output is mock_output
assert output.json_output is None
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'-v',
os.path.abspath('dummy playbook file'),
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_run_playbooks(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout='', stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
output = module.run_playbook(['playbook1', 'playbook2'], local_guest, json_output=False)
assert output.execution_output is mock_output
assert output.json_output is None
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'-v',
os.path.abspath('playbook1'),
os.path.abspath('playbook2')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_change_ansible_playbook_filepath_option(module, local_guest, monkeypatch, assert_output):
module._config['ansible-playbook-filepath'] = '/foo/bar/ansible-playbook'
mock_output = MagicMock(exit_code=0, stdout='', stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
output = module.run_playbook(['playbook1', 'playbook2'], local_guest, json_output=False)
assert output.execution_output is mock_output
assert output.json_output is None
mock_command_init.assert_called_once_with([
'/foo/bar/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'-v',
os.path.abspath('playbook1'),
os.path.abspath('playbook2')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_change_ansible_playbook_filepath_argument(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout='', stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
output = module.run_playbook(
['playbook1', 'playbook2'],
local_guest, json_output=False,
ansible_playbook_filepath='/foo/bar/ansible-playbook'
)
assert output.execution_output is mock_output
assert output.json_output is None
mock_command_init.assert_called_once_with([
'/foo/bar/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'-v',
os.path.abspath('playbook1'),
os.path.abspath('playbook2')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_error(log, module, local_guest, monkeypatch, assert_output):
# simulate output of failed ansible-playbook run, giving user JSON blob with an error message
mock_error = gluetool.GlueCommandError([], output=MagicMock(stdout='{"msg": "dummy error message"}', stderr=''))
mock_command_run = MagicMock(side_effect=mock_error)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
with pytest.raises(gluetool.GlueError, match='Failure during Ansible playbook execution'):
module.run_playbook('dummy playbook file', local_guest)
def test_error_exit_code(log, module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=1, stdout='{"msg": "dummy error message"}', stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
with pytest.raises(gluetool.GlueError, match='Failure during Ansible playbook execution'):
module.run_playbook('dummy playbook file', local_guest)
def test_extra_vars(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout=json.dumps({'task': 'ok'}), stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
module.run_playbook('dummy playbook file', local_guest, variables={
'FOO': 'bar'
}, cwd='foo')
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'--extra-vars', 'FOO="bar"',
'-v',
os.path.abspath('dummy playbook file')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd='foo', env=env_variables)
def test_dryrun(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout=json.dumps({'task': 'ok'}), stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
monkeypatch.setattr(module.glue, '_dryrun_level', gluetool.glue.DryRunLevels.DRY)
module.run_playbook('dummy playbook path', local_guest)
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook',
'-i', '127.0.0.1,',
'--private-key', local_guest.key,
'-C',
'-v',
os.path.abspath('dummy playbook path')
], logger=local_guest.logger)
env_variables = os.environ.copy()
env_variables.update({'ANSIBLE_STDOUT_CALLBACK': 'debug'})
mock_command_run.assert_called_once_with(cwd=None, env=env_variables)
def test_additonal_options(module, local_guest, monkeypatch, assert_output):
mock_output = MagicMock(exit_code=0, stdout=json.dumps({'task': 'ok'}), stderr='')
mock_command_init = MagicMock(return_value=None)
mock_command_run = MagicMock(return_value=mock_output)
monkeypatch.setattr(gluetool.utils.Command, '__init__', mock_command_init)
monkeypatch.setattr(gluetool.utils.Command, 'run', mock_command_run)
module._config['ansible-playbook-options'] = ['-vvv', '-d']
module.run_playbook('dummy playbook file', local_guest, variables={
'FOO': 'bar'
})
mock_command_init.assert_called_once_with([
'/usr/bin/ansible-playbook', '-i', '127.0.0.1,', '--private-key', local_guest.key,
'--extra-vars', 'FOO="bar"',
'-vvv',
'-d',
'-v',
os.path.abspath('dummy playbook file')
], logger=local_guest.logger)
|
py | b400f0bf85f4689108f42dfc263f9e4410b445d3 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from jacket.i18n import _LI
from jacket.compute.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling
purposes
"""
def host_passes(self, host_state, spec_obj):
"""Skip nodes that have already been attempted."""
retry = spec_obj.retry
if not retry:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled")
return True
# TODO(sbauza): Once the HostState is actually a ComputeNode, we could
# easily get this one...
host = [host_state.host, host_state.nodename]
# TODO(sbauza)... and we wouldn't need to primitive the hosts into
# lists
hosts = [[cn.host, cn.hypervisor_hostname] for cn in retry.hosts]
passes = host not in hosts
if not passes:
LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s"), {'host': host, 'hosts': hosts})
# Host passes if it's not in the list of previously attempted hosts:
return passes
|
py | b400f19e08cebf556eb5b52cabdd4629f7bff94f |
import time
import signal
import os
from ..common.trex_types import RC_OK, RC_ERR
from ..common.trex_req_resp_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo
class RRConnection(object):
'''
Manages a simple RR connection to the server
connection state object
describes the connection to the server state
can be either fully disconnected, fully connected
or marked for disconnection
'''
DISCONNECTED = 1
CONNECTED = 2
MARK_FOR_DISCONNECT = 3
def __init__ (self, ctx):
# hold pointer to context
self.ctx = ctx
self.sigint_on_conn_lost = False
# low level RPC layer
self.rpc = JsonRpcClient(ctx)
# init state
self.state = (self.DISCONNECTED, None)
def probe_server (self):
rpc = JsonRpcClient(self.ctx)
rpc.set_timeout_sec(self.rpc.get_timeout_sec())
try:
rpc.connect()
return rpc.transmit('get_version')
finally:
rpc.disconnect()
def disconnect (self):
'''
disconnect from both channels
sync and async
'''
try:
self.rpc.disconnect()
self.rpc.set_api_h(None)
finally:
self.state = (self.DISCONNECTED, None)
def connect (self):
'''
connect to the server (two channels)
'''
# first disconnect if already connected
if self.is_any_connected():
self.disconnect()
# connect
rc = self.__connect()
if not rc:
self.disconnect()
return rc
def barrier (self):
'''
executes a barrier
when it retruns, an async barrier is guaranteed
'''
pass
def sync (self):
'''
fully sync the client with the server
must be called after all the config
was done
'''
pass
def mark_for_disconnect (self, cause):
'''
A multithread safe call
any thread can mark the current connection
as not valid
and will require the main thread to reconnect
'''
pass
def sigint_on_conn_lost_enable (self):
'''
when enabled, if connection
is lost a SIGINT will be sent
to the main thread
'''
self.sigint_on_conn_lost = True
def sigint_on_conn_lost_disable (self):
'''
disable SIGINT dispatching
on case of connection lost
'''
self.sigint_on_conn_lost = False
def is_alive (self):
'''
return True if any data has arrived
the server in the last 3 seconds
'''
return True
def is_any_connected (self):
return ( self.rpc.is_connected() )
def is_connected (self):
return (self.state[0] == self.CONNECTED and self.rpc.is_connected())
def is_marked_for_disconnect (self):
return self.state[0] == self.MARK_FOR_DISCONNECT
def get_disconnection_cause (self):
return self.state[1]
########## private ################
def __connect (self):
'''
connect to the server (two channels)
'''
# start with the sync channel
self.ctx.logger.pre_cmd("Connecting to RPC server on {0}:{1}".format(self.ctx.server, self.ctx.sync_port))
rc = self.rpc.connect()
if not rc:
return rc
# API sync V2
rc = self.rpc.transmit("api_sync_v2", params = self.ctx.api_ver)
self.ctx.logger.post_cmd(rc)
if not rc:
# api_sync_v2 is not present in v2.30 and older
if rc.errno() == JsonRpcErrNo.MethodNotSupported:
return RC_ERR('Mismatch between client and server versions')
return rc
# get the API_H and provide it to the RPC channel from now on
self.rpc.set_api_h(rc.data()['api_h'])
self.state = (self.CONNECTED, None)
return RC_OK()
|
py | b400f29f8dfca787854e9f744be83aa810654299 | import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.uic import loadUiType
from vitables.vtsite import ICONDIR
import vitables.utils
__docformat__ = "restructuredtext"
translate = QtWidgets.QApplication.translate
# This method of the PyQt5.uic module allows for dynamically loading user
# interfaces created by QtDesigner. See the PyQt5 Reference Guide for more
# info.
Ui_SettingsDialog = loadUiType(
os.path.join(os.path.dirname(__file__), "settings_dlg.ui")
)[0]
class Preferences(QtWidgets.QDialog, Ui_SettingsDialog):
"""
Create the Settings dialog.
By loading UI files at runtime we can:
- create user interfaces at runtime (without using pyuic)
- use multiple inheritance, MyParentClass(BaseClass, FormClass)
"""
def __init__(self):
"""
Initialize the preferences dialog.
* initializes the dialog appearance according to current preferences
* connects dialog widgets to slots that provide them functionality
"""
self.vtapp = vitables.utils.getVTApp()
self.vtgui = self.vtapp.gui
# Create the Settings dialog and customize it
super(Preferences, self).__init__(self.vtgui)
self.setupUi(self)
self.config = self.vtapp.config
self.pg_loader = self.vtapp.plugins_mgr
self.all_plugins = dict(
item for item in self.pg_loader.all_plugins.items()
)
self.enabled_plugins = self.pg_loader.enabled_plugins[:]
# Setup the Plugins page
self.setupPluginsPage()
# Setup the page selector widget
self.setupSelector()
# Display the General Settings page
self.stackedPages.setCurrentIndex(0)
# Style names can be retrieved with qt.QStyleFactory.keys()
styles = QtWidgets.QStyleFactory.keys()
self.stylesCB.insertItems(0, styles)
# The dictionary of current ViTables preferences
self.initial_prefs = {}
style_sheet = self.vtgui.logger.styleSheet()
paper = style_sheet[-7:]
self.initial_prefs["Logger/Paper"] = QtGui.QColor(paper)
self.initial_prefs["Logger/Text"] = self.vtgui.logger.textColor()
self.initial_prefs["Logger/Font"] = self.vtgui.logger.font()
self.initial_prefs[
"Workspace/Background"
] = self.vtgui.workspace.background()
self.initial_prefs["Look/currentStyle"] = self.config.current_style
self.initial_prefs[
"Session/startupWorkingDir"
] = self.config.initial_working_directory
self.initial_prefs[
"Session/restoreLastSession"
] = self.config.restore_last_session
# The dictionary used to update the preferences
self.new_prefs = {}
# Apply the current ViTables configuration to the Preferences dialog
self.resetPreferences()
# Connect SIGNALS to SLOTS
self.buttonsBox.helpRequested.connect(
QtWidgets.QWhatsThis.enterWhatsThisMode
)
def setupPluginsPage(self):
"""Populate the tree of plugins.
"""
nrows = len(self.all_plugins)
self.plugins_model = QtGui.QStandardItemModel(nrows, 2, self)
self.pluginsTV.setModel(self.plugins_model)
header = QtWidgets.QHeaderView(QtCore.Qt.Horizontal, self.pluginsTV)
header.setStretchLastSection(True)
self.pluginsTV.setHeader(header)
self.plugins_model.setHorizontalHeaderLabels(["Name", "Comment"])
# Populate the model
row = 0
for UID, desc in self.all_plugins.items():
name = desc["name"]
comment = desc["comment"]
nitem = QtGui.QStandardItem(name)
nitem.setData(UID)
nitem.setCheckable(True)
if UID in self.enabled_plugins:
nitem.setCheckState(QtCore.Qt.Checked)
citem = QtGui.QStandardItem(comment)
self.plugins_model.setItem(row, 0, nitem)
self.plugins_model.setItem(row, 1, citem)
row = row + 1
def setupSelector(self):
"""Setup the page selector widget of the Preferences dialog.
"""
iconsdir = os.path.join(ICONDIR, "64x64")
self.selector_model = QtGui.QStandardItemModel(self)
self.pageSelector.setModel(self.selector_model)
# Populate the model with top level items
alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
general_item = QtGui.QStandardItem()
general_item.setIcon(
QtGui.QIcon(os.path.join(iconsdir, "preferences-other.png"))
)
general_item.setText(
translate(
"Preferences", " General ", "Text for page selector icon"
)
)
general_item.setTextAlignment(alignment)
general_item.setFlags(flags)
style_item = QtGui.QStandardItem()
style_item.setIcon(
QtGui.QIcon(
os.path.join(iconsdir, "preferences-desktop-theme.png")
)
)
style_item.setText(
translate(
"Preferences", "Look & Feel", "Text for page selector icon"
)
)
style_item.setTextAlignment(alignment)
style_item.setFlags(flags)
self.plugins_item = QtGui.QStandardItem()
self.plugins_item.setIcon(
QtGui.QIcon(os.path.join(iconsdir, "preferences-plugin.png"))
)
self.plugins_item.setText(
translate(
"Preferences", " Plugins ", "Text for page selector icon"
)
)
self.plugins_item.setTextAlignment(alignment)
self.plugins_item.setFlags(flags)
for item in (general_item, style_item, self.plugins_item):
self.selector_model.appendRow(item)
# Add items for *loaded* plugins to the Plugins item
index = self.selector_model.indexFromItem(self.plugins_item)
self.pageSelector.setExpanded(index, True)
for UID in self.vtapp.plugins_mgr.loaded_plugins.keys():
name = UID.split("#@#")[0]
item = QtGui.QStandardItem(name)
item.setData(UID)
self.plugins_item.appendRow(item)
@QtCore.Slot("QModelIndex", name="on_pageSelector_clicked")
def changeSettingsPage(self, index):
"""Slot for changing the selected page in the Settings dialog.
:Parameter index: the index clicked by the user
"""
# If top level item is clicked
if not index.parent().isValid():
self.stackedPages.setCurrentIndex(index.row())
# If a plugin item is clicked
elif index.parent() == self.plugins_item.index():
pluginID = self.selector_model.itemFromIndex(index).data()
self.aboutPluginPage(pluginID)
@QtCore.Slot("QAbstractButton *", name="on_buttonsBox_clicked")
def executeButtonAction(self, button):
"""Slot that manages button box clicks in the Preferences dialog.
Whenever one of the `Help`, `Reset`, `Cancel` or `OK` buttons is
clicked in the Preferences dialog this slot is called.
:Parameter button: the clicked button.
"""
if button == self.buttonsBox.button(QtWidgets.QDialogButtonBox.Reset):
self.resetPreferences()
elif button == self.buttonsBox.button(QtWidgets.QDialogButtonBox.Help):
pass
elif button == self.buttonsBox.button(
QtWidgets.QDialogButtonBox.Cancel
):
self.reject()
else:
self.applySettings()
def resetPreferences(self):
"""
Apply the current ``ViTables`` configuration to the Preferences dialog.
"""
# Startup page
if self.initial_prefs["Session/startupWorkingDir"] == "last":
self.lastDirCB.setChecked(True)
else:
self.lastDirCB.setChecked(False)
self.restoreCB.setChecked(
self.initial_prefs["Session/restoreLastSession"]
)
# Style page
self.sampleTE.selectAll()
self.sampleTE.setCurrentFont(self.initial_prefs["Logger/Font"])
self.sampleTE.setTextColor(self.initial_prefs["Logger/Text"])
self.sampleTE.moveCursor(QtGui.QTextCursor.End) # Unselect text
self.sampleTE.setStyleSheet(
"background-color: {0}".format(
self.initial_prefs["Logger/Paper"].name()
)
)
self.workspaceLabel.setStyleSheet(
"background-color: {0}".format(
self.initial_prefs["Workspace/Background"].color().name()
)
)
index = self.stylesCB.findText(self.initial_prefs["Look/currentStyle"])
self.stylesCB.setCurrentIndex(index)
# The visual update done above is not enough, we must reset the
# new preferences dictionary and the list of enabled plugins
self.new_prefs.clear()
self.new_prefs.update(self.initial_prefs)
self.enabled_plugins = self.pg_loader.enabled_plugins[:]
self.all_plugins = dict(
item for item in self.pg_loader.all_plugins.items()
)
# UIDs = self.all_plugins.keys()
for row in range(0, self.plugins_model.rowCount()):
item = self.plugins_model.item(row, 0)
if item.data() in self.enabled_plugins:
item.setCheckState(2)
else:
item.setCheckState(0)
def applySettings(self):
"""
Apply the current preferences to the application and close the dialog.
This method is a slot connected to the `accepted` signal. See
ctor for details.
"""
# Update the plugins manager
self.updatePluginsManager()
# Update the rest of settings
for key, value in self.new_prefs.items():
self.new_prefs[key] = value
self.accept()
@QtCore.Slot("bool", name="on_lastDirCB_toggled")
def setInitialWorkingDirectory(self, cb_on):
"""
Configure startup behavior of the application.
If the `Start in last opened directory` check box is checked
then when the user opens a file *for the very first time* the
current directory of the file selector dialog (CDFSD) will be
the last directory accessed in the previous ``ViTables session``. If
it is not checked then ``ViTables`` follows the standard behavior:
if it has been started from a console session then the CDFSD
will be the current working directory of the session, if it has
been started from a menu/desktop-icon/run-command-applet the
CDFSD will be the users' home.
This is a slot method.
:Parameter cb_on: a boolean indicator of the checkbox state.
"""
if cb_on:
self.new_prefs["Session/startupWorkingDir"] = "last"
else:
self.new_prefs["Session/startupWorkingDir"] = "home"
@QtCore.Slot("bool", name="on_restoreCB_toggled")
def setRestoreSession(self, cb_on):
"""
Configure startup behavior of the application.
If the `Restore last session` checkbox is checked then, at the
next startup, the application will atempt to restore the last
working session.
This is a slot method.
:Parameter cb_on: a boolean indicator of the checkbox state.
"""
if cb_on:
self.new_prefs["Session/restoreLastSession"] = True
else:
self.new_prefs["Session/restoreLastSession"] = False
@QtCore.Slot(name="on_workspacePB_clicked")
def setWorkspaceColor(self):
"""Slot for setting the workspace background color."""
stylesheet = self.workspaceLabel.styleSheet()
background = stylesheet[-7:]
color = QtWidgets.QColorDialog.getColor(QtGui.QColor(background))
# The selected color is applied to the sample label besides the button
if color.isValid():
self.new_prefs["Workspace/Background"] = QtGui.QBrush(color)
new_stylesheet = stylesheet.replace(background, color.name())
self.workspaceLabel.setStyleSheet(new_stylesheet)
@QtCore.Slot("QString", name="on_stylesCB_activated")
def setGlobalStyle(self, style_name):
"""
Slot for setting the application style.
:Parameter style_name: the style to be applied
"""
self.new_prefs["Look/currentStyle"] = style_name
def updatePluginsManager(self):
"""Update the plugins manager before closing the dialog.
When the Apply button is clicked the list of enabled plugins
is refreshed.
"""
self.enabled_plugins = []
for row in range(self.plugins_model.rowCount()):
item = self.plugins_model.item(row, 0)
if item.checkState() == 2:
self.enabled_plugins.append(item.data())
self.pg_loader.enabled_plugins = self.enabled_plugins[:]
def aboutPluginPage(self, pluginID):
"""A page with info about the plugin clicked in the selector widget.
:Parameter pluginID: a unique ID for getting the proper plugin
"""
# Refresh the Preferences dialog pages. There is at most one
# About Plugin page at any given time
while self.stackedPages.count() > 3:
about_page = self.stackedPages.widget(3)
self.stackedPages.removeWidget(about_page)
del about_page
pg_instance = self.vtapp.plugins_mgr.loaded_plugins[pluginID]
try:
about_page = pg_instance.helpAbout(self.stackedPages)
except AttributeError:
about_page = QtWidgets.QWidget(self.stackedPages)
label = QtWidgets.QLabel(
translate(
"Preferences",
"Sorry, there are no info available for this plugin",
"A text label",
),
about_page,
)
layout = QtWidgets.QVBoxLayout(about_page)
layout.addWidget(label)
self.stackedPages.addWidget(about_page)
self.stackedPages.setCurrentIndex(3)
|
py | b400f408b72e0e5d136e14aa83f5885450406cd6 | from django.http import HttpResponse
from django.template import Template
from django.template.context import Context
from .models import Item
def admin_required_view(request):
assert request.user.is_staff
return HttpResponse(Template("You are an admin").render(Context()))
def item_count(request):
return HttpResponse("Item count: %d" % Item.objects.count())
|
py | b400f4af2ad0fab41b7c66494d7e6f3fc39e5729 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Application default credentials.
Implements application default credentials and project ID detection.
"""
import io
import json
import logging
import os
import warnings
import six
from google.auth import environment_vars
from google.auth import exceptions
import google.auth.transport._http_client
_LOGGER = logging.getLogger(__name__)
# Valid types accepted for file-based credentials.
_AUTHORIZED_USER_TYPE = "authorized_user"
_SERVICE_ACCOUNT_TYPE = "service_account"
_VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE)
# Help message when no credentials can be found.
_HELP_MESSAGE = """\
Could not automatically determine credentials. Please set {env} or \
explicitly create credentials and re-run the application. For more \
information, please see \
https://cloud.google.com/docs/authentication/getting-started
""".format(
env=environment_vars.CREDENTIALS
).strip()
# Warning when using Cloud SDK user credentials
_CLOUD_SDK_CREDENTIALS_WARNING = """\
Your application has authenticated using end user credentials from Google \
Cloud SDK. We recommend that most server applications use service accounts \
instead. If your application continues to use end user credentials from Cloud \
SDK, you might receive a "quota exceeded" or "API not enabled" error. For \
more information about service accounts, see \
https://cloud.google.com/docs/authentication/"""
def _warn_about_problematic_credentials(credentials):
"""Determines if the credentials are problematic.
Credentials from the Cloud SDK that are associated with Cloud SDK's project
are problematic because they may not have APIs enabled and have limited
quota. If this is the case, warn about it.
"""
from google.auth import _cloud_sdk
if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID:
warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING)
def _load_credentials_from_file(filename):
"""Loads credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials do not
have the project ID information.
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
wrong format or is missing.
"""
if not os.path.exists(filename):
raise exceptions.DefaultCredentialsError(
"File {} was not found.".format(filename)
)
with io.open(filename, "r") as file_obj:
try:
info = json.load(file_obj)
except ValueError as caught_exc:
new_exc = exceptions.DefaultCredentialsError(
"File {} is not a valid json file.".format(filename), caught_exc
)
six.raise_from(new_exc, caught_exc)
# The type key should indicate that the file is either a service account
# credentials file or an authorized user credentials file.
credential_type = info.get("type")
if credential_type == _AUTHORIZED_USER_TYPE:
from google.auth import _cloud_sdk
try:
credentials = _cloud_sdk.load_authorized_user_credentials(info)
except ValueError as caught_exc:
msg = "Failed to load authorized user credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
# Authorized user credentials do not contain the project ID.
_warn_about_problematic_credentials(credentials)
return credentials, None
elif credential_type == _SERVICE_ACCOUNT_TYPE:
from google.oauth2 import service_account
try:
credentials = service_account.Credentials.from_service_account_info(info)
except ValueError as caught_exc:
msg = "Failed to load service account credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
return credentials, info.get("project_id")
else:
raise exceptions.DefaultCredentialsError(
"The file {file} does not have a valid type. "
"Type is {type}, expected one of {valid_types}.".format(
file=filename, type=credential_type, valid_types=_VALID_TYPES
)
)
def _get_gcloud_sdk_credentials():
"""Gets the credentials and project ID from the Cloud SDK."""
from google.auth import _cloud_sdk
# Check if application default credentials exist.
credentials_filename = _cloud_sdk.get_application_default_credentials_path()
if not os.path.isfile(credentials_filename):
return None, None
credentials, project_id = _load_credentials_from_file(credentials_filename)
if not project_id:
project_id = _cloud_sdk.get_project_id()
return credentials, project_id
def _get_explicit_environ_credentials():
"""Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
variable."""
explicit_file = os.environ.get(environment_vars.CREDENTIALS)
if explicit_file is not None:
credentials, project_id = _load_credentials_from_file(
os.environ[environment_vars.CREDENTIALS]
)
return credentials, project_id
else:
return None, None
def _get_gae_credentials():
"""Gets Google App Engine App Identity credentials and project ID."""
# While this library is normally bundled with app_engine, there are
# some cases where it's not available, so we tolerate ImportError.
try:
import google.auth.app_engine as app_engine
except ImportError:
return None, None
try:
credentials = app_engine.Credentials()
project_id = app_engine.get_project_id()
return credentials, project_id
except EnvironmentError:
return None, None
def _get_gce_credentials(request=None):
"""Gets credentials and project ID from the GCE Metadata Service."""
# Ping requires a transport, but we want application default credentials
# to require no arguments. So, we'll use the _http_client transport which
# uses http.client. This is only acceptable because the metadata server
# doesn't do SSL and never requires proxies.
# While this library is normally bundled with compute_engine, there are
# some cases where it's not available, so we tolerate ImportError.
try:
from google.auth import compute_engine
from google.auth.compute_engine import _metadata
except ImportError:
return None, None
if request is None:
request = google.auth.transport._http_client.Request()
if _metadata.ping(request=request):
# Get the project ID.
try:
project_id = _metadata.get_project_id(request=request)
except exceptions.TransportError:
project_id = None
return compute_engine.Credentials(), project_id
else:
return None, None
def default(scopes=None, request=None):
"""Gets the default credentials for the current environment.
`Application Default Credentials`_ provides an easy way to obtain
credentials to call Google APIs for server-to-server or local applications.
This function acquires credentials from the environment in the following
order:
1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
to the path of a valid service account JSON private key file, then it is
loaded and returned. The project ID returned is the project ID defined
in the service account file if available (some older files do not
contain project ID information).
2. If the `Google Cloud SDK`_ is installed and has application default
credentials set they are loaded and returned.
To enable application default credentials with the Cloud SDK run::
gcloud auth application-default login
If the Cloud SDK has an active project, the project ID is returned. The
active project can be set using::
gcloud config set project
3. If the application is running in the `App Engine standard first
generation environment`_ then the credentials and project ID from the
`App Identity Service`_ are used.
4. If the application is running in `Compute Engine`_, the `App Engine
standard second generation environment`_, or the `App Engine flexible
environment`_ then the credentials and project ID are obtained from the
`Metadata Service`_.
5. If no credentials are found,
:class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
.. _Application Default Credentials: https://developers.google.com\
/identity/protocols/application-default-credentials
.. _Google Cloud SDK: https://cloud.google.com/sdk
.. _App Engine standard environment: https://cloud.google.com/appengine
.. _App Identity Service: https://cloud.google.com/appengine/docs/python\
/appidentity/
.. _Compute Engine: https://cloud.google.com/compute
.. _App Engine flexible environment: https://cloud.google.com\
/appengine/flexible
.. _Metadata Service: https://cloud.google.com/compute/docs\
/storing-retrieving-metadata
Example::
import google.auth
credentials, project_id = google.auth.default()
Args:
scopes (Sequence[str]): The list of scopes for the credentials. If
specified, the credentials will automatically be scoped if
necessary.
request (google.auth.transport.Request): An object used to make
HTTP requests. This is used to detect whether the application
is running on Compute Engine. If not specified, then it will
use the standard library http client to make requests.
Returns:
Tuple[~google.auth.credentials.Credentials, Optional[str]]:
the current environment's credentials and project ID. Project ID
may be None, which indicates that the Project ID could not be
ascertained from the environment.
Raises:
~google.auth.exceptions.DefaultCredentialsError:
If no credentials were found, or if the credentials found were
invalid.
"""
from google.auth.credentials import with_scopes_if_required
explicit_project_id = os.environ.get(
environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
)
checkers = (
_get_explicit_environ_credentials,
_get_gcloud_sdk_credentials,
_get_gae_credentials,
lambda: _get_gce_credentials(request),
)
for checker in checkers:
credentials, project_id = checker()
if credentials is not None:
credentials = with_scopes_if_required(credentials, scopes)
effective_project_id = explicit_project_id or project_id
if not effective_project_id:
_LOGGER.warning(
"No project ID could be determined. Consider running "
"`gcloud config set project` or setting the %s "
"environment variable",
environment_vars.PROJECT,
)
return credentials, effective_project_id
raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)
|
py | b400f55ad0e7167866aba1d5e8e302eb6f817402 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Supported analyzer types.
"""
import os
import re
from codechecker_analyzer import env
from codechecker_common.logger import get_logger
from .. import host_check
from .clangtidy.analyzer import ClangTidy
from .clangsa.analyzer import ClangSA
LOG = get_logger('analyzer')
supported_analyzers = {ClangSA.ANALYZER_NAME: ClangSA,
ClangTidy.ANALYZER_NAME: ClangTidy}
def is_ctu_capable(context):
""" Detects if the current clang is CTU compatible. """
enabled_analyzers, _ = \
check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
if not enabled_analyzers:
return False
clangsa_cfg = ClangSA.construct_config_handler([], context)
return clangsa_cfg.ctu_capability.is_ctu_capable
def is_ctu_on_demand_available(context):
""" Detects if the current clang is capable of on-demand AST loading. """
enabled_analyzers, _ = \
check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
if not enabled_analyzers:
return False
clangsa_cfg = ClangSA.construct_config_handler([], context)
return clangsa_cfg.ctu_capability.is_on_demand_ctu_available
def is_statistics_capable(context):
""" Detects if the current clang is Statistics compatible. """
# Resolve potentially missing binaries.
enabled_analyzers, _ = \
check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
if not enabled_analyzers:
return False
clangsa_cfg = ClangSA.construct_config_handler([], context)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
checkers = ClangSA.get_analyzer_checkers(clangsa_cfg, check_env)
stat_checkers_pattern = re.compile(r'.+statisticscollector.+')
for checker_name, _ in checkers:
if stat_checkers_pattern.match(checker_name):
return True
return False
def is_z3_capable(context):
""" Detects if the current clang is Z3 compatible. """
enabled_analyzers, _ = \
check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
if not enabled_analyzers:
return False
analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME)
analyzer_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
return host_check.has_analyzer_option(analyzer_binary,
['-Xclang',
'-analyzer-constraints=z3'],
analyzer_env)
def is_z3_refutation_capable(context):
""" Detects if the current clang is Z3 refutation compatible. """
# This function basically checks whether the corresponding analyzer config
# option exists i.e. it is visible on analyzer config option help page.
# However, it doesn't mean that Clang itself is compiled with Z3.
if not is_z3_capable(context):
return False
check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME)
analyzer_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
return host_check.has_analyzer_config_option(analyzer_binary,
'crosscheck-with-z3',
analyzer_env)
def check_supported_analyzers(analyzers, context):
"""
Checks the given analyzers in the current context for their executability
and support in CodeChecker.
This method also updates the given context.analyzer_binaries if the
context's configuration is bogus but had been resolved.
:return: (enabled, failed) where enabled is a list of analyzer names
and failed is a list of (analyzer, reason) tuple.
"""
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_binaries = context.analyzer_binaries
enabled_analyzers = set()
failed_analyzers = set()
for analyzer_name in analyzers:
if analyzer_name not in supported_analyzers:
failed_analyzers.add((analyzer_name,
"Analyzer unsupported by CodeChecker."))
continue
# Get the compiler binary to check if it can run.
available_analyzer = True
analyzer_bin = analyzer_binaries.get(analyzer_name)
if not analyzer_bin:
failed_analyzers.add((analyzer_name,
"Failed to detect analyzer binary."))
available_analyzer = False
elif not os.path.isabs(analyzer_bin):
# If the analyzer is not in an absolute path, try to find it...
found_bin = supported_analyzers[analyzer_name].\
resolve_missing_binary(analyzer_bin, check_env)
# found_bin is an absolute path, an executable in one of the
# PATH folders.
# If found_bin is the same as the original binary, ie., normally
# calling the binary without any search would have resulted in
# the same binary being called, it's NOT a "not found".
if found_bin and os.path.basename(found_bin) != analyzer_bin:
LOG.debug("Configured binary '%s' for analyzer '%s' was "
"not found, but environment PATH contains '%s'.",
analyzer_bin, analyzer_name, found_bin)
context.analyzer_binaries[analyzer_name] = \
os.path.realpath(found_bin)
analyzer_bin = found_bin
if not analyzer_bin or \
not host_check.check_clang(analyzer_bin, check_env):
# Analyzers unavailable under absolute paths are deliberately a
# configuration problem.
failed_analyzers.add((analyzer_name,
"Cannot execute analyzer binary."))
available_analyzer = False
if available_analyzer:
enabled_analyzers.add(analyzer_name)
return enabled_analyzers, failed_analyzers
def construct_analyzer(buildaction,
analyzer_config):
try:
analyzer_type = buildaction.analyzer_type
LOG.debug_analyzer('Constructing %s analyzer.', analyzer_type)
if analyzer_type in supported_analyzers:
analyzer = supported_analyzers[analyzer_type](analyzer_config,
buildaction)
else:
analyzer = None
LOG.error('Unsupported analyzer type: %s', analyzer_type)
return analyzer
except Exception as ex:
LOG.debug_analyzer(ex)
return None
def build_config_handlers(args, context, enabled_analyzers):
"""
Handle config from command line or from config file if no command line
config is given.
Supported command line config format is in JSON tidy supports YAML also but
no standard lib for yaml parsing is available in python.
"""
analyzer_config_map = {}
for ea in enabled_analyzers:
config_handler = supported_analyzers[ea].\
construct_config_handler(args, context)
analyzer_config_map[ea] = config_handler
return analyzer_config_map
|
py | b400f56fac756408b742a729cd8899073d0bbdb3 | # -*- coding: utf-8 -*-
# 18/8/9
# create by: snower
import datetime
import pytz
import binascii
try:
from bson.objectid import ObjectId
except ImportError:
ObjectId = None
from ..utils import get_timezone
from .filter import Filter
class IntFilter(Filter):
def filter(self, value):
if isinstance(value, int):
return int(value)
if isinstance(value, float):
return int(value)
if value is True:
return 1
if value is None or value is False:
return 0
if isinstance(value, datetime.datetime):
try:
return int(value.timestamp())
except:
return 0
if isinstance(value, datetime.date):
try:
return int(datetime.datetime(value.year, value.month, value.day).timestamp())
except:
return 0
if isinstance(value, datetime.timedelta):
return int(value.total_seconds())
if isinstance(value, (list, tuple, set)):
result = 0
for cv in value:
result += self.filter(cv)
return result
if isinstance(value, dict):
result = 0
for ck, cv in value.items():
result += self.filter(cv)
return result
try:
return int(value)
except:
return 0
class FloatFilter(Filter):
def filter(self, value):
if isinstance(value, float):
return float(value)
if isinstance(value, int):
return float(value)
if value is True:
return 1.0
if value is None or value is False:
return 0.0
if isinstance(value, datetime.datetime):
try:
return float(value.timestamp())
except:
return 0.0
if isinstance(value, datetime.date):
try:
return float(datetime.datetime(value.year, value.month, value.day).timestamp())
except:
return 0.0
if isinstance(value, datetime.timedelta):
return float(value.total_seconds())
if isinstance(value, (list, tuple, set)):
result = 0.0
for cv in value:
result += self.filter(cv)
return result
if isinstance(value, dict):
result = 0.0
for ck, cv in value.items():
result += self.filter(cv)
return result
try:
return float(value)
except:
return 0.0
class StringFilter(Filter):
def filter(self, value):
if isinstance(value, str):
return value
if value is None:
return ""
if value is True:
return "true"
if value is False:
return "false"
if isinstance(value, datetime.datetime):
try:
return value.strftime(self.args or "%Y-%m-%d %H:%M:%S")
except:
return ""
if isinstance(value, datetime.time):
try:
return value.strftime(self.args or "%H:%M:%S")
except:
return ""
if isinstance(value, datetime.date):
try:
return value.strftime(self.args or "%Y-%m-%d")
except:
return ""
if isinstance(value, int):
try:
return (self.args or "%d") % value
except:
return "0"
if isinstance(value, float):
try:
return (self.args or "%f") % value
except:
return "0.0"
if isinstance(value, bytes):
try:
if self.args == "hex":
return binascii.a2b_hex(value)
return value.decode(self.args or "utf-8")
except:
return ""
if self.args:
try:
return self.args % value
except:
return ""
try:
return str(value)
except:
return ""
class BytesFilter(Filter):
def filter(self, value):
if isinstance(value, bytes):
return value
if value is None:
return b""
if value is True:
return b"true"
if value is False:
return b"false"
if isinstance(value, datetime.datetime):
try:
return bytes(value.strftime(self.args or "%Y-%m-%d %H:%M:%S"), "utf-8")
except:
return b""
if isinstance(value, datetime.time):
try:
return bytes(value.strftime(self.args or "%H:%M:%S"), "utf-8")
except:
return b""
if isinstance(value, datetime.date):
try:
return bytes(value.strftime(self.args or "%Y-%m-%d"))
except:
return ""
if isinstance(value, int):
try:
return bytes((self.args or "%d") % value, "utf-8")
except:
return b"0"
if isinstance(value, float):
try:
return bytes(((self.args or "%f") % value), "utf-8")
except:
return b"0.0"
if isinstance(value, str):
try:
if self.args == "hex":
return binascii.b2a_hex(value)
return value.encode(self.args or "utf-8")
except:
return b""
if self.args:
try:
return bytes(self.args % value, "utf-8")
except: pass
try:
return bytes(str(value), "utf-8")
except:
return b""
class BooleanFilter(Filter):
def filter(self, value):
if value is True or value is False:
return value
try:
return bool(value)
except:
return False
def sprintf(self, value):
if value is True:
return "true"
return "true"
class ArrayFilter(Filter):
def filter(self, value):
if isinstance(value, list):
return value
if isinstance(value, (set, tuple)):
return list(value)
if value is None:
return []
return [value]
class MapFilter(Filter):
def filter(self, value):
if isinstance(value, dict):
return value
if isinstance(value, (set, list, tuple)):
if not value:
return {}
if len(value) == 1 and isinstance(value[0], dict):
return value[0]
value = list(value)
value_len = len(value)
try:
return {value[i]: (value[i + 1] if i + 1 < value_len else None) for i in range(0, value_len, 2)}
except:
pass
if value is None:
return {}
try:
return dict(value)
except:
return {}
class ObjectIdFilter(Filter):
def __init__(self, *args, **kwargs):
if ObjectId is None:
raise ImportError(u"bson required")
super(ObjectIdFilter, self).__init__(*args, **kwargs)
def filter(self, value):
if isinstance(value, ObjectId):
return value
if value is None:
return ObjectId("000000000000000000000000")
if value is True:
return ObjectId("ffffffffffffffffffffffff")
if value is False:
return ObjectId("000000000000000000000000")
if isinstance(value, (list, tuple, set)):
results = []
for cv in value:
results.append(self.filter(cv))
return results
if isinstance(value, dict):
results = {}
for ck, cv in value.items():
results[ck] = self.filter(cv)
return value
if isinstance(value, (int, float)):
return ObjectId.from_datetime(datetime.datetime.fromtimestamp(value, pytz.timezone(self.args) if self.args else pytz.UTC))
if isinstance(value, datetime.datetime):
return ObjectId.from_datetime(value)
try:
return ObjectId(value)
except:
try:
return datetime.datetime.strptime(value, self.args or "%Y-%m-%d %H:%M:%S").astimezone(tz=get_timezone())
except:
return ObjectId("000000000000000000000000")
class DateTimeFilter(Filter):
def __init__(self, *args, **kwargs):
super(DateTimeFilter, self).__init__(*args, **kwargs)
if self.args and self.args[-1] == ")":
try:
index = self.args.rindex("(")
self.dtformat = self.args[:index]
self.tzname = self.args[index + 1: -1]
except:
self.dtformat = self.args
self.tzname = None
else:
self.dtformat = self.args
self.tzname = None
def filter(self, value):
localzone = get_timezone()
if isinstance(value, datetime.datetime):
if localzone != value.tzinfo:
value = value.astimezone(tz=localzone)
if self.dtformat:
return datetime.datetime.strptime(value.strftime(self.dtformat), self.dtformat).astimezone(tz=localzone)
return value
if isinstance(value, datetime.timedelta):
value = datetime.datetime.now(tz=localzone) + value
if self.dtformat:
return datetime.datetime.strptime(value.strftime(self.dtformat), self.dtformat).astimezone(tz=localzone)
return value
if isinstance(value, (int, float)):
value = datetime.datetime.fromtimestamp(value, pytz.timezone(self.tzname) if self.tzname else pytz.UTC)
if localzone != value.tzinfo:
value = value.astimezone(tz=localzone)
if self.dtformat:
return datetime.datetime.strptime(value.strftime(self.dtformat), self.dtformat).astimezone(tz=localzone)
return value
if isinstance(value, (list, tuple, set)):
results = []
for cv in value:
results.append(self.filter(cv))
return results
if isinstance(value, dict):
results = {}
for ck, cv in value.items():
results[ck] = self.filter(cv)
return value
if ObjectId and isinstance(value, ObjectId):
return value.generation_time
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day, tzinfo=pytz.timezone(self.tzname) if self.tzname else localzone)
if localzone != value.tzinfo:
value = value.astimezone(tz=localzone)
if self.dtformat:
return datetime.datetime.strptime(value.strftime(self.dtformat), self.dtformat).astimezone(tz=localzone)
return value
try:
return datetime.datetime.strptime(value, self.dtformat or "%Y-%m-%d %H:%M:%S").astimezone(tz=localzone)
except:
return None
def sprintf(self, value):
if isinstance(value, datetime.date):
if isinstance(value, datetime.datetime):
return value.strftime(self.args or "%Y-%m-%d %H:%M:%S")
return value.strftime(self.args or "%Y-%m-%d")
if isinstance(value, datetime.time):
return value.strftime(self.args or "%H:%M:%S")
return str(value)
class DateFilter(Filter):
def filter(self, value):
if isinstance(value, datetime.date):
if isinstance(value, datetime.datetime):
localzone = get_timezone()
if localzone != value.tzinfo:
value = value.astimezone(tz=localzone)
return datetime.date(value.year, value.month, value.day)
return value
if isinstance(value, datetime.timedelta):
localzone = get_timezone()
dt = datetime.datetime.now(tz=localzone)
return datetime.date(dt.year, dt.month, dt.day) + value
if isinstance(value, (int, float)):
dt = datetime.datetime.fromtimestamp(value, pytz.timezone(self.args) if self.args else pytz.UTC).astimezone(tz=get_timezone())
return datetime.date(dt.year, dt.month, dt.day)
if isinstance(value, (list, tuple, set)):
results = []
for cv in value:
results.append(self.filter(cv))
return results
if isinstance(value, dict):
results = {}
for ck, cv in value.items():
results[ck] = self.filter(cv)
return value
try:
dt = datetime.datetime.strptime(value, self.args or "%Y-%m-%d").astimezone(tz=get_timezone())
return datetime.date(dt.year, dt.month, dt.day)
except:
return None
def sprintf(self, value):
if isinstance(value, datetime.date):
if isinstance(value, datetime.datetime):
return value.strftime(self.args or "%Y-%m-%d %H:%M:%S")
return value.strftime(self.args or "%Y-%m-%d")
if isinstance(value, datetime.time):
return value.strftime(self.args or "%H:%M:%S")
return str(value)
class TimeFilter(Filter):
def filter(self, value):
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.date):
if isinstance(value, datetime.datetime):
localzone = get_timezone()
if localzone != value.tzinfo:
value = value.astimezone(tz=localzone)
return datetime.time(value.hour, value.minute, value.second)
return datetime.time(0, 0, 0)
if isinstance(value, datetime.timedelta):
localzone = get_timezone()
dt = datetime.datetime.now(tz=localzone) + value
return datetime.time(dt.hour, dt.minute, dt.second)
if isinstance(value, (int, float)):
dt = datetime.datetime.fromtimestamp(value, pytz.timezone(self.args) if self.args else pytz.UTC).astimezone(tz=get_timezone())
return datetime.time(dt.hour, dt.minute, dt.second)
if isinstance(value, (list, tuple, set)):
results = []
for cv in value:
results.append(self.filter(cv))
return results
if isinstance(value, dict):
results = {}
for ck, cv in value.items():
results[ck] = self.filter(cv)
return value
try:
dt = datetime.datetime.strptime("2000-01-01 " + value, "%Y-%m-%d " + (self.args or "%H:%M:%S")).astimezone(tz=get_timezone())
return datetime.time(dt.hour, dt.minute, dt.second)
except:
return None
def sprintf(self, value):
if isinstance(value, datetime.date):
if isinstance(value, datetime.datetime):
return value.strftime(self.args or "%Y-%m-%d %H:%M:%S")
return value.strftime(self.args or "%Y-%m-%d")
if isinstance(value, datetime.time):
return value.strftime(self.args or "%H:%M:%S")
return str(value) |
py | b400f5f4d58c06cd81574624c449a2e2f665d0a8 | import click
import pandas as pd
import os
import time
from binance.client import Client
@click.command()
@click.option('--pm', default=0.01, help='Profit Margin')
@click.option('--ci', default=60, help='Check interval in seconds')
def trade_coins(pm, ci):
if os.path.isdir('crypto-data'):
pass
else:
os.mkdir('crypto-data')
if os.path.exists('cypto-data/crypto-log.txt'):
os.remove('cypto-data/crypto-log.txt')
else:
pass
file = open('crypto-data/crypto-log.txt', 'w')
api_key = 'XzkhEsU0HHARj880CT9Ck4tlsD4WZaQSbsbdAy7kXvMp3yOjzKdRbCZdGAeQ2YhA'
api_secret = 'xfg149pnCFuhWHOtgHOmgwCox4DkGRv66FI0kN5VNY3FLuhMhsfhJxJRMIGPeGiQ'
client = Client(api_key, api_secret)
print('--- Client Instantiated ---')
file.write('--- Client Instantiated ---\n')
pd.options.mode.chained_assignment = None
coins = client.get_all_coins_info()
print('--- Coins retrieved ---')
file.write('--- Coins retrieved ---\n')
coins_dataframe = pd.DataFrame(columns=['coin','trading','isLegalMoney'])
for coin in coins:
coins_dataframe = coins_dataframe.append({'coin': coin['coin'], 'trading': coin['trading'],'isLegalMoney': coin['isLegalMoney']}, ignore_index=True)
coins_dataframe = coins_dataframe[coins_dataframe.trading > 0 ]
coins_dataframe = coins_dataframe[coins_dataframe.isLegalMoney == 0]
print('--- Retrieving Trade Fees ---')
file.write('--- Retrieving Trade Fees ---\n')
coins_dataframe['trade_fee'] = coins_dataframe.coin.apply(lambda x: client.get_trade_fee(symbol=x+'USDT'))
coins_dataframe.trade_fee = coins_dataframe.trade_fee.apply(lambda x: x if len(x)> 0 else None)
coins_dataframe = coins_dataframe[coins_dataframe.trade_fee.astype(str) != 'None']
coins_dataframe['trade_symbol'] = coins_dataframe.trade_fee.apply(lambda x: x[0]['symbol'])
print('--- Trade fees retrieved ---')
file.write('--- Trade fees retrieved ---\n')
coins_dataframe.reset_index(inplace=True,drop=True)
coins_dataframe['profit'] = 0
coins_dataframe['gained'] = 0
coins_dataframe['times_it_sold'] = 0
coins_dataframe['coin_status'] = 'initialized'
print('--- Statistics initialized ---')
file.write('--- Statistics initialized ---\n')
initial_buying_prices = os.path.exists('crypto-data/initial_buying_prices.csv')
coins_dataframe['initial_buy_price'] = None
coins_dataframe['initial_buy_cost'] = None
coins_dataframe.reset_index(drop=True,inplace=True)
if initial_buying_prices:
initial_buying_prices = pd.read_csv('crypto-data/initial_buying_prices.csv')
for coin in initial_buying_prices.coin.values:
if coin in coins_dataframe.coin.values:
index = coins_dataframe[coins_dataframe.coin == coin].index[0]
index_initial_buying_prices = initial_buying_prices[initial_buying_prices.coin == coin].index[0]
coins_dataframe.loc[index, 'initial_buy_price'] = initial_buying_prices.loc[index_initial_buying_prices,'initial_buy_price']
coins_dataframe.loc[index, 'initial_buy_cost'] = initial_buying_prices.loc[index_initial_buying_prices,'initial_buy_cost']
else:
prices = client.get_all_tickers()
for price in prices:
if price['symbol'] in coins_dataframe.trade_symbol.values:
index = coins_dataframe[coins_dataframe.trade_symbol == price['symbol']].index[0]
coins_dataframe.loc[index,'initial_buy_price'] = float(price['price'])
coins_dataframe.loc[index,'initial_buy_cost'] = float(coins_dataframe.loc[index,'trade_fee'][0]['makerCommission']) * float(price['price'])
coins_dataframe[['coin','initial_buy_price','initial_buy_cost']].to_csv('crypto-data/initial_buying_prices.csv',index=False)
print('--- Initial prices retrieved ---')
file.write('--- Initial prices retrieved ---\n')
print('--- Starting the updating of the prices loop ---')
file.write('--- Starting the updating of the prices loop ---\n')
coins_sold_history = os.path.exists('crypto-data/coins_sold_history.csv')
if coins_sold_history:
df_coins_sold = pd.read_csv('crypto-data/coins_sold_history.csv')
else:
df_coins_sold = pd.DataFrame(columns=['coin','initial_buy_price','initial_buy_cost','out_price','estimated_cost', 'profit'])
coins_rebought_history = os.path.exists('crypto-data/coins_rebought_history.csv')
if coins_rebought_history:
df_coins_rebought = pd.read_csv('crypto-data/coins_rebought_history.csv')
else:
df_coins_rebought = pd.DataFrame(columns=['coin','initial_buy_price','initial_buy_cost','rebought_at', 'rebuy_cost', 'gained'])
start = time.time()
count = float(0)
while 1:
try:
if int(((time.time() - start))) >= ci:
prices = client.get_all_tickers()
for price in prices:
if price['symbol'] in coins_dataframe.trade_symbol.values:
index = coins_dataframe[coins_dataframe.trade_symbol == price['symbol']].index[0]
coins_dataframe.loc[index,'updated_price'] = float(price['price'])
coins_dataframe.loc[index,'out_price'] = float(coins_dataframe.loc[index,'trade_fee'][0]['takerCommission']) * float(price['price'])
if coins_dataframe.loc[index,'coin_status'] == 'initialized':
coins_dataframe.loc[index,'estimated_cost'] = ((float(coins_dataframe.loc[index,'initial_buy_cost']) + float(coins_dataframe.loc[index,'out_price']) + float(coins_dataframe.loc[index,'initial_buy_price'])))
if coins_dataframe.loc[index,'coin_status'] == 'rebought':
coins_dataframe.loc[index,'estimated_cost'] = (float(coins_dataframe.loc[index,'rebuy_cost']) + float(coins_dataframe.loc[index,'out_price']) + float(coins_dataframe.loc[index,'rebought_at']))
if coins_dataframe.loc[index,'coin_status'] != 'sold':
coins_dataframe.loc[index,'profit'] = coins_dataframe.loc[index,'updated_price'] - float(coins_dataframe.loc[index,'estimated_cost'])
if coins_dataframe.loc[index,'profit'] >= pm:
coins_dataframe.loc[index,'coin_status'] = 'sold'
coins_dataframe.loc[index,'times_it_sold'] += float(1)
df_coins_sold = df_coins_sold.append({'coin': price['symbol'], 'initial_buy_price': coins_dataframe.loc[index,'initial_buy_price'],
'initial_buy_cost': coins_dataframe.loc[index,'initial_buy_cost'], 'out_price': price['price'], 'estimated_cost': coins_dataframe.loc[index,'estimated_cost'],
'profit': coins_dataframe.loc[index,'profit']}, ignore_index=True)
df_coins_sold.to_csv('crypto-data/coins_sold_history.csv', index=False)
print('--- SOLD {} ---'.format(price['symbol']))
file.write('--- SOLD {} ---\n'.format(price['symbol']))
prices = client.get_all_tickers()
for price in prices:
if price['symbol'] in coins_dataframe.trade_symbol.values:
index = coins_dataframe[coins_dataframe.trade_symbol == price['symbol']].index[0]
if coins_dataframe.loc[index,'coin_status'] == 'sold':
if float(price['price']) <= float(coins_dataframe.loc[index,'initial_buy_price']):
coins_dataframe.loc[index,'gained'] += float(coins_dataframe.loc[index,'profit'])
coins_dataframe.loc[index,'coin_status'] = 'rebought'
coins_dataframe.loc[index,'rebought_at'] = (float(price['price']))
coins_dataframe.loc[index,'rebuy_cost'] = float(coins_dataframe.loc[index,'trade_fee'][0]['makerCommission']) * (float(price['price']))
df_coins_rebought = df_coins_rebought.append({'coin': price['symbol'], 'initial_buy_price': coins_dataframe.loc[index, 'initial_buy_price'],
'initial_buy_cost': coins_dataframe.loc[index, 'initial_buy_cost'],
'total_initial_buy_cost': float(coins_dataframe.loc[index, 'initial_buy_price']) + float(coins_dataframe.loc[index, 'initial_buy_cost']),
'rebought_at': coins_dataframe.loc[index,'rebought_at'],
'rebuy_cost':coins_dataframe.loc[index,'rebuy_cost'],
'total_rebuy_cost': float(coins_dataframe.loc[index,'rebought_at']) + float(coins_dataframe.loc[index,'rebuy_cost']),
'gained': coins_dataframe.loc[index,'gained']}, ignore_index=True)
df_coins_rebought.to_csv('crypto-data/coins_rebought_history.csv', index=False)
print('--- REBOUGHT {} ---'.format(price['symbol']))
file.write('--- REBOUGHT {} ---\n'.format(price['symbol']))
start = time.time()
count += float(ci/60)
coins_dataframe.to_csv('crypto-data/export_coins.csv', index=False)
print('--- DataFrame export updated ( Count {} )---'.format(count))
file.write('--- DataFrame export updated ( Count {} )---\n'.format(count))
file.close()
file = open('crypto-data/crypto-log.txt', 'w')
except Exception as e:
print('--- Exception received ---')
print('{}'.format(e))
print('--- Restarting updating the prices ---')
file.write('--- Exception received ---\n')
file.write('{}\n'.format(e))
file.write('--- Restarting updating the prices ---\n')
if __name__ == '__main__':
trade_coins()
|
py | b400f63896dd8c5d4fa0e184c07e585edac2c25c | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from workspace_tools.paths import *
from workspace_tools.data.support import *
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
PERIPHERALS = join(TEST_DIR, "peripherals")
BENCHMARKS_DIR = join(TEST_DIR, "benchmarks")
SD = join(TEST_DIR, "sd")
TMP102 = join(PERIPHERALS, 'TMP102')
"""
Wiring:
* Ground:
* LPC1*: p1
* KL25Z: GND
* Vout
* LPC1*: p40
* KL25Z: P3V3
* TMP102 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTC9, SCL=PTC8)
* MAXWSNENV: (SDA=TP6, SCL=TP5)
* digital_loop (Digital(In|Out|InOut), InterruptIn):
* Arduino headers: (D0 <-> D7)
* LPC1549: (D2 <-> D7)
* LPC1*: (p5 <-> p25 )
* KL25Z: (PTA5<-> PTC6)
* NUCLEO_F103RB: (PC_6 <-> PB_8)
* MAXWSNENV: (TP3 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7)
* port_loop (Port(In|Out|InOut)):
* Arduino headers: (D0 <-> D7), (D1 <-> D6)
* LPC1*: (p5 <-> p25), (p6 <-> p26)
* KL25Z: (PTA5 <-> PTC6), (PTA4 <-> PTC5)
* NUCLEO_F103RB: (PC_6 <-> PB_8), (PC_5 <-> PB_9)
* MAXWSNENV: (TP1 <-> TP3), (TP2 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7), (P1_1 <-> P4_6)
* analog_loop (AnalogIn, AnalogOut):
* Arduino headers: (A0 <-> A5)
* LPC1549: (A0 <-> D12)
* LPC1*: (p17 <-> p18 )
* KL25Z: (PTE30 <-> PTC2)
* analog_pot (AnalogIn):
* Arduino headers: (A0, A1)
* SD (SPI):
* LPC1*: (mosi=p11 , miso=p12 , sclk=p13 , cs=p14 )
* KL25Z: (mosi=PTD2, miso=PTD3, sclk=PTD1, cs=PTD0)
* MMA7660 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* i2c_loop:
* LPC1768: (p28 <-> p9), (p27 <-> p10)
* i2c_eeprom:
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTE0, SCL=PTE1)
* can_transceiver:
* LPC1768: (RX=p9, TX=p10)
* LPC1549: (RX=D9, TX=D8)
* LPC4088: (RX=p9, TX=p10)
"""
TESTS = [
# Automated MBED tests
{
"id": "MBED_A1", "description": "Basic",
"source_dir": join(TEST_DIR, "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "mbed", "file"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_A3", "description": "C++ STL",
"source_dir": join(TEST_DIR, "mbed", "stl"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_A4", "description": "I2C TMP102",
"source_dir": join(TEST_DIR, "mbed", "i2c_TMP102"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, TMP102],
"automated": True,
"peripherals": ["TMP102"]
},
{
"id": "MBED_A5", "description": "DigitalIn DigitalOut",
"source_dir": join(TEST_DIR, "mbed", "digitalin_digitalout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A6", "description": "DigitalInOut",
"source_dir": join(TEST_DIR, "mbed", "digitalinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A7", "description": "InterruptIn",
"source_dir": join(TEST_DIR, "mbed", "interruptin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A8", "description": "Analog",
"source_dir": join(TEST_DIR, "mbed", "analog"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["analog_loop"],
"mcu": ["LPC1768", "LPC2368", "LPC2460", "KL25Z", "K64F", "K22F", "LPC4088", "LPC1549",
"NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_F302R8", "NUCLEO_F303K8", "NUCLEO_F303RE",
"NUCLEO_F334R8", "NUCLEO_L053R8", "NUCLEO_L073RZ", "NUCLEO_L152RE",
"NUCLEO_F410RB", "NUCLEO_F411RE", "NUCLEO_F446RE", "DISCO_F407VG", "DISCO_F746NG",
"ARCH_MAX", "MAX32600MBED", "MOTE_L152RC", "B96B_F446VE"]
},
{
"id": "MBED_A9", "description": "Serial Echo at 115200",
"source_dir": join(TEST_DIR, "mbed", "echo"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "echo"
},
{
"id": "MBED_A10", "description": "PortOut PortIn",
"source_dir": join(TEST_DIR, "mbed", "portout_portin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A11", "description": "PortInOut",
"source_dir": join(TEST_DIR, "mbed", "portinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A12", "description": "SD File System",
"source_dir": join(TEST_DIR, "mbed", "sd"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "MBED_A13", "description": "I2C MMA7660 accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA7660"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA7660')],
"automated": True,
"peripherals": ["MMA7660"]
},
{
"id": "MBED_A14", "description": "I2C Master",
"source_dir": join(TEST_DIR, "mbed", "i2c_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A15", "description": "I2C Slave",
"source_dir": join(TEST_DIR, "mbed", "i2c_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A16", "description": "SPI Master",
"source_dir": join(TEST_DIR, "mbed", "spi_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A17", "description": "SPI Slave",
"source_dir": join(TEST_DIR, "mbed", "spi_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A18", "description": "Interrupt vector relocation",
"source_dir": join(TEST_DIR, "mbed", "vtor_reloc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768"],
"automated": True,
},
{
"id": "MBED_A19", "description": "I2C EEPROM read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 15,
},
{
"id": "MBED_A20", "description": "I2C master/slave test",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768", "RZ_A1H"],
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_A21", "description": "Call function before main (mbed_main)",
"source_dir": join(TEST_DIR, "mbed", "call_before_main"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A22", "description": "SPIFI for LPC4088 (test 1)",
"source_dir": join(TEST_DIR, "mbed", "spifi1"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A23", "description": "SPIFI for LPC4088 (test 2)",
"source_dir": join(TEST_DIR, "mbed", "spifi2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A24", "description": "Serial echo with RTS/CTS flow control",
"source_dir": join(TEST_DIR, "mbed", "echo_flow_control"),
"dependencies": [MBED_LIBRARIES],
"automated": "True",
"host_test": "echo_flow_control",
"mcu": ["LPC1768"],
"peripherals": ["extra_serial"]
},
{
"id": "MBED_A25", "description": "I2C EEPROM line read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom_line"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A26", "description": "AnalogIn potentiometer test",
"source_dir": join(TEST_DIR, "mbed", "analog_pot"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["analog_pot"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A27", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 20,
"peripherals": ["can_transceiver"],
"mcu": ["LPC1549", "LPC1768"],
},
{
"id": "MBED_BLINKY", "description": "Blinky",
"source_dir": join(TEST_DIR, "mbed", "blinky"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUS", "description": "Blinky BUS",
"source_dir": join(TEST_DIR, "mbed", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
"duration": 15,
},
{
"id": "MBED_BUSOUT", "description": "BusOut",
"source_dir": join(TEST_DIR, "mbed", "bus_out"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 15,
},
# Size benchmarks
{
"id": "BENCHMARK_1", "description": "Size (c environment)",
"source_dir": join(BENCHMARKS_DIR, "cenv"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_2", "description": "Size (float math)",
"source_dir": join(BENCHMARKS_DIR, "float_math"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_3", "description": "Size (printf)",
"source_dir": join(BENCHMARKS_DIR, "printf"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_4", "description": "Size (mbed libs)",
"source_dir": join(BENCHMARKS_DIR, "mbed"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_5", "description": "Size (all)",
"source_dir": join(BENCHMARKS_DIR, "all"),
"dependencies": [MBED_LIBRARIES]
},
# performance related tests
{
"id": "PERF_1", "description": "SD Stdio R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_2", "description": "SD FileHandle R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fhandle"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_3", "description": "SD FatFS R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fatfs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
# Not automated MBED tests
{
"id": "MBED_1", "description": "I2C SRF08",
"source_dir": join(TEST_DIR, "mbed", "i2c_SRF08"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'SRF08')],
"peripherals": ["SRF08"]
},
{
"id": "MBED_2", "description": "stdio",
"source_dir": join(TEST_DIR, "mbed", "stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
#"host_test": "stdio_auto"
},
{
"id": "MBED_3", "description": "PortOut",
"source_dir": join(TEST_DIR, "mbed", "portout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_4", "description": "Sleep",
"source_dir": join(TEST_DIR, "mbed", "sleep"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 30,
"mcu": ["LPC1768", "LPC11U24", "LPC4088","LPC4088_DM","NRF51822", "LPC11U68"]
},
{
"id": "MBED_5", "description": "PWM",
"source_dir": join(TEST_DIR, "mbed", "pwm"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_6", "description": "SW Reset",
"source_dir": join(TEST_DIR, "mbed", "reset"),
"dependencies": [MBED_LIBRARIES],
"duration": 15
},
{
"id": "MBED_7", "description": "stdio benchmark",
"source_dir": join(TEST_DIR, "mbed", "stdio_benchmark"),
"dependencies": [MBED_LIBRARIES],
"duration": 40
},
{
"id": "MBED_8", "description": "SPI",
"source_dir": join(TEST_DIR, "mbed", "spi"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_9", "description": "Sleep Timeout",
"source_dir": join(TEST_DIR, "mbed", "sleep_timeout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_10", "description": "Hello World",
"source_dir": join(TEST_DIR, "mbed", "hello"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "hello_auto",
},
{
"id": "MBED_11", "description": "Ticker Int",
"source_dir": join(TEST_DIR, "mbed", "ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"duration": 20,
},
{
"id": "MBED_12", "description": "C++",
"source_dir": join(TEST_DIR, "mbed", "cpp"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_13", "description": "Heap & Stack",
"source_dir": join(TEST_DIR, "mbed", "heap_and_stack"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_14", "description": "Serial Interrupt",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_15", "description": "RPC",
"source_dir": join(TEST_DIR, "mbed", "rpc"),
"dependencies": [MBED_LIBRARIES, join(LIB_DIR, "rpc"), TEST_MBED_LIB],
"automated": False,
"mcu": ["LPC1768"]
},
{
"id": "MBED_16", "description": "RTC",
"source_dir": join(TEST_DIR, "mbed", "rtc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"exclude_mcu": ["NRF51822", "NRF51822_BOOT", "NRF51822_OTA", "NRF51822_Y5_MBUG",
"NRF51_DK", "NRF51_DK_BOOT", "NRF51_DK_OTA",
"NRF51_MICROBIT", "NRF51_MICROBIT_B", "NRF51_MICROBIT_BOOT",
"NRF51_MICROBIT_B_BOOT", "NRF51_MICROBIT_B_OTA", "NRF51_MICROBIT_OTA",
"HRM1017", "HRM1017_BOOT", "HRM1701_OTA",
"TY51822R3", "TY51822R3_BOOT", "TY51822R3_OTA",
"NRF15_DONGLE", "NRF15_DONGLE_BOOT", "NRF15_DONGLE_OTA",
"ARCH_BLE", "ARCH_BLE_BOOT", "ARCH_BLE_OTA",
"ARCH_LINK", "ARCH_LINK_BOOT", "ARCH_LINK_OTA",
"RBLAB_BLENANO", "RBLAB_BLENANO_BOOT", "RBLAB_BLENANO_OTA",
"RBLAB_NRF51822", "RBLAB_NRF51822_BOOT", "RBLAB_NRF51822_OTA",
"SEEED_TINY_BLE", "SEEED_TINY_BLE_BOOT", "SEEED_TINY_BLE_OTA",
"WALLBOT_BLE", "WALLBOT_BLE_BOOT", "WALLBOT_BLE_OTA",
"DELTA_DFCM_NNN40", "DELTA_DFCM_NNN40_BOOT", "DELTA_DFCM_NNN40_OTA",
"LPC1114"],
#"host_test": "rtc_auto",
"duration": 15
},
{
"id": "MBED_17", "description": "Serial Interrupt 2",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_18", "description": "Local FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_19", "description": "SD FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir_sd"),
"dependencies": [MBED_LIBRARIES, FS_LIBRARY],
"peripherals": ["SD"]
},
{
"id": "MBED_20", "description": "InterruptIn 2",
"source_dir": join(TEST_DIR, "mbed", "interruptin_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_21", "description": "freopen Stream",
"source_dir": join(TEST_DIR, "mbed", "freopen"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_22", "description": "Semihost",
"source_dir": join(TEST_DIR, "mbed", "semihost"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_23", "description": "Ticker Int us",
"source_dir": join(TEST_DIR, "mbed", "ticker_2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_24", "description": "Timeout Int us",
"source_dir": join(TEST_DIR, "mbed", "timeout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_25", "description": "Time us",
"source_dir": join(TEST_DIR, "mbed", "time_us"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_26", "description": "Integer constant division",
"source_dir": join(TEST_DIR, "mbed", "div"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_27", "description": "SPI ADXL345",
"source_dir": join(TEST_DIR, "mbed", "spi_ADXL345"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'ADXL345')],
"peripherals": ["ADXL345"]
},
{
"id": "MBED_28", "description": "Interrupt chaining (InterruptManager)",
"source_dir": join(TEST_DIR, "mbed", "interrupt_chaining"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_29", "description": "CAN network test",
"source_dir": join(TEST_DIR, "mbed", "can"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H"]
},
{
"id": "MBED_30", "description": "CAN network test using interrupts",
"source_dir": join(TEST_DIR, "mbed", "can_interrupt"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H"]
},
{
"id": "MBED_31", "description": "PWM LED test",
"source_dir": join(TEST_DIR, "mbed", "pwm_led"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_32", "description": "Pin toggling",
"source_dir": join(TEST_DIR, "mbed", "pin_toggling"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_33", "description": "C string operations",
"source_dir": join(TEST_DIR, "mbed", "cstring"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 10,
"automated": False,
},
{
"id": "MBED_34", "description": "Ticker Two callbacks",
"source_dir": join(TEST_DIR, "mbed", "ticker_3"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_35", "description": "SPI C12832 display",
"source_dir": join(TEST_DIR, "mbed", "spi_C12832"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'C12832')],
"peripherals": ["C12832"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_36", "description": "WFI correct behavior",
"source_dir": join(TEST_DIR, "mbed", "wfi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False
},
{
"id": "MBED_37", "description": "Serial NC RX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_rx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_38", "description": "Serial NC TX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_tx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
# CMSIS RTOS tests
{
"id": "CMSIS_RTOS_1", "description": "Basic",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_2", "description": "Mutex",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_3", "description": "Semaphore",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_4", "description": "Signals",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_5", "description": "Queue",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_6", "description": "Mail",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_8", "description": "ISR",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
# mbed RTOS tests
{
"id": "RTOS_1", "description": "Basic thread",
"source_dir": join(TEST_DIR, "rtos", "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_2", "description": "Mutex resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_3", "description": "Semaphore resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_4", "description": "Signals messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_5", "description": "Queue messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_6", "description": "Mail messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "mbed", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_8", "description": "ISR (Queue)",
"source_dir": join(TEST_DIR, "rtos", "mbed", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_9", "description": "SD File write-read",
"source_dir": join(TEST_DIR, "rtos", "mbed", "file"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"],
"mcu": ["LPC1768", "LPC11U24", "LPC812", "KL25Z",
"KL05Z", "K64F", "KL46Z", "RZ_A1H",
"DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F401RE", "NUCLEO_F410RB", "DISCO_F469NI"],
},
# Networking Tests
{
"id": "NET_1", "description": "TCP client hello world",
"source_dir": join(TEST_DIR, "net", "helloworld", "tcpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_2", "description": "NIST Internet Time Service",
"source_dir": join(TEST_DIR, "net", "helloworld", "udpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_3", "description": "TCP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "tcpecho_server_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_4", "description": "TCP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_5", "description": "UDP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "udp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_server_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_6", "description": "UDP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "udp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_7", "description": "HTTP client hello world",
"source_dir": join(TEST_DIR, "net", "protocols", "HTTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
"peripherals": ["ethernet"],
},
{
"id": "NET_8", "description": "NTP client",
"source_dir": join(TEST_DIR, "net", "protocols", "NTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_9", "description": "Multicast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_10", "description": "Multicast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_11", "description": "Broadcast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_12", "description": "Broadcast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_13", "description": "TCP client echo loop",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client_loop"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_14", "description": "UDP PHY/Data link layer",
"source_dir": join(TEST_DIR, "net", "echo", "udp_link_layer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"automated": False,
"duration": 20,
"host_test": "udp_link_layer_auto",
"peripherals": ["ethernet"],
},
# u-blox tests
{
"id": "UB_1", "description": "u-blox USB modem: HTTP client",
"source_dir": [join(TEST_DIR, "net", "cellular", "http", "ubloxusb"), join(TEST_DIR, "net", "cellular", "http", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "UB_2", "description": "u-blox USB modem: SMS test",
"source_dir": [join(TEST_DIR, "net", "cellular", "sms", "ubloxusb"), join(TEST_DIR, "net", "cellular", "sms", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
# USB Tests
{
"id": "USB_1", "description": "Mouse",
"source_dir": join(TEST_DIR, "usb", "device", "basic"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_2", "description": "Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_3", "description": "Mouse_Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_4", "description": "Serial Port",
"source_dir": join(TEST_DIR, "usb", "device", "serial"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "USB_5", "description": "Generic HID",
"source_dir": join(TEST_DIR, "usb", "device", "raw_hid"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_6", "description": "MIDI",
"source_dir": join(TEST_DIR, "usb", "device", "midi"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_7", "description": "AUDIO",
"source_dir": join(TEST_DIR, "usb", "device", "audio"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
# CMSIS DSP
{
"id": "CMSIS_DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "cmsis", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# mbed DSP
{
"id": "DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "mbed", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# KL25Z
{
"id": "KL25Z_1", "description": "LPTMR",
"source_dir": join(TEST_DIR, "KL25Z", "lptmr"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_2", "description": "PIT",
"source_dir": join(TEST_DIR, "KL25Z", "pit"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_3", "description": "TSI Touch Sensor",
"source_dir": join(TEST_DIR, "mbed", "tsi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'TSI')],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_4", "description": "RTC",
"source_dir": join(TEST_DIR, "KL25Z", "rtc"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_5", "description": "MMA8451Q accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA8451Q"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA8451Q')],
"mcu": ["KL25Z", "KL05Z", "KL46Z", "K20D50M"],
"automated": True,
"duration": 15,
},
# Examples
{
"id": "EXAMPLE_1", "description": "/dev/null",
"source_dir": join(TEST_DIR, "mbed", "dev_null"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "dev_null_auto",
},
{
"id": "EXAMPLE_2", "description": "FS + RTOS",
"source_dir": join(TEST_DIR, "mbed", "fs"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
},
# CPPUTEST Library provides Unit testing Framework
#
# To write TESTs and TEST_GROUPs please add CPPUTEST_LIBRARY to 'dependencies'
#
# This will also include:
# 1. test runner - main function with call to CommandLineTestRunner::RunAllTests(ac, av)
# 2. Serial console object to print test result on serial port console
#
# Unit testing with cpputest library
{
"id": "UT_1", "description": "Basic",
"source_dir": join(TEST_DIR, "utest", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "utest", "semihost_fs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "UT_3", "description": "General tests",
"source_dir": join(TEST_DIR, "utest", "general"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_BUSIO", "description": "BusIn BusOut",
"source_dir": join(TEST_DIR, "utest", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_I2C_EEPROM_ASYNCH", "description": "I2C Asynch eeprom",
"source_dir": join(TEST_DIR, "utest", "i2c_eeprom_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SERIAL_ASYNCH", "description": "Asynch serial test (req 2 serial peripherals)",
"source_dir": join(TEST_DIR, "utest", "serial_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SPI_ASYNCH", "description": "Asynch spi test",
"source_dir": join(TEST_DIR, "utest", "spi_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_LP_TICKER", "description": "Low power ticker test",
"source_dir": join(TEST_DIR, "utest", "lp_ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
# Tests used for target information purposes
{
"id": "DTCT_1", "description": "Simple detect test",
"source_dir": join(TEST_DIR, "mbed", "detect"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "detect_auto",
},
]
# Group tests with the same goals into categories
GROUPS = {
"core": ["MBED_A1", "MBED_A2", "MBED_A3", "MBED_A18"],
"digital_io": ["MBED_A5", "MBED_A6", "MBED_A7", "MBED_A10", "MBED_A11"],
"analog_io": ["MBED_A8"],
"i2c": ["MBED_A19", "MBED_A20"],
"spi": ["MBED_A12"],
}
GROUPS["rtos"] = [test["id"] for test in TESTS if test["id"].startswith("RTOS_")]
GROUPS["net"] = [test["id"] for test in TESTS if test["id"].startswith("NET_")]
GROUPS["automated"] = [test["id"] for test in TESTS if test.get("automated", False)]
# Look for 'TEST_GROUPS' in private_settings.py and update the GROUPS dictionary
# with the information in test_groups if found
try:
from workspace_tools.private_settings import TEST_GROUPS
except:
TEST_GROUPS = {}
GROUPS.update(TEST_GROUPS)
class Test:
DEFAULTS = {
#'mcu': None,
'description': None,
'dependencies': None,
'duration': 10,
'host_test': 'host_test',
'automated': False,
'peripherals': None,
#'supported': None,
'source_dir': None,
'extra_files': None
}
def __init__(self, n):
self.n = n
self.__dict__.update(Test.DEFAULTS)
self.__dict__.update(TESTS[n])
def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
def get_description(self):
if self.description:
return self.description
else:
return self.id
def __cmp__(self, other):
return cmp(self.n, other.n)
def __str__(self):
return "[%3d] %s: %s" % (self.n, self.id, self.get_description())
def __getitem__(self, key):
if key == "id": return self.id
elif key == "mcu": return self.mcu
elif key == "exclude_mcu": return self.exclude_mcu
elif key == "dependencies": return self.dependencies
elif key == "description": return self.description
elif key == "duration": return self.duration
elif key == "host_test": return self.host_test
elif key == "automated": return self.automated
elif key == "peripherals": return self.peripherals
elif key == "supported": return self.supported
elif key == "source_dir": return self.source_dir
elif key == "extra_files": return self.extra_files
else:
return None
TEST_MAP = dict([(test['id'], Test(i)) for i, test in enumerate(TESTS)])
|
py | b400f6af9539236a21c754d90f559ed6ab5f6ddd | import re
# based on https://www.tutorialspoint.com/roman-to-integer-in-python
class DeRomanizer(object):
def __init__(self):
self.romans = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,'IV':4,'IX':9,'XL':40,'XC':90,'CD':400,'CM':900}
def convert_word(self, word):
if not re.match(r"^[I|V|X|L|C|D|M]+$", word, flags=re.IGNORECASE):
return word
i = 0
num = 0
word = word.upper()
while i < len(word):
if i+1<len(word) and word[i:i+2] in self.romans:
num+=self.romans[word[i:i+2]]
i+=2
else:
num+=self.romans[word[i]]
i+=1
return str(num)
def convert_all(self, field):
converted = [self.convert_word(word) for word in field.split()]
return ' '.join(converted)
|
py | b400fa7b49422dbf6be3ca81b28286690aa3a40a | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import os
import uuid
from datetime import datetime
from bkbase.dataflow.one_model.api.deeplearning_api import delete_path
from bkbase.dataflow.one_model.topo.deeplearning_node import Builder, Node
from bkbase.dataflow.one_model.utils import deeplearning_logger
from bkbase.dataflow.one_model.utils.deeplearning_constant import (
IcebergReservedField,
TableType,
)
from bkbase.dataflow.one_model.utils.input_window_analyzer import (
BatchTimeDelta,
BatchTimeStamp,
)
from bkbase.dataflow.one_model.utils.periodic_time_helper import PeriodicTimeHelper
from bkdata_datalake import tables
from jpype.types import JDouble, JInt, JLong, JString
class SinkNode(Node):
output = None
dt_event_timestamp = 0
def __init__(self, builder):
super().__init__(builder)
self.output = builder.output
self.dt_event_timestamp = builder.dt_event_timestamp
class ModelSinkNode(SinkNode):
table_type = None
def __init__(self, builder):
super().__init__(builder)
self.table_type = builder.table_type
def create_sink(self, model):
tf_config = json.loads(os.environ["TF_CONFIG"])
index = tf_config["task"]["index"]
path = self.output["path"]
if path.endswith("/"):
path = path[0 : len(path) - 1]
if index > 0:
path = "{}_{}".format(path, uuid.uuid4().hex[0:8])
# 这里一定要注意,在各个worker上一定要同时进行save操作,因为分布式模型save的过程中会有
# 不同worker之间的聚合交互,如果有一个worker上不进行,那就会block整个save的操作
# 但同时,也只以有chief结点,即index=0的节点才能真正落地到指定的目录,其它的需要保存到临时目录
# 落地完成之后进行清理
deeplearning_logger.info("begin to save:{}".format(path))
model.save(path)
deeplearning_logger.info("save successfully")
if index > 0:
deeplearning_logger.info("delete extra paths...")
self.delete_extra_path(path)
def delete_extra_path(self, path):
hdfs_cluster = self.output["cluster_group"]
user = self.output["hdfs_user"]
component_url = self.output["component_url"]
# 抽取出路径
index_array = [i for i, ltr in enumerate(path) if ltr == "/"]
path = path[index_array[2] :]
if path.startswith("//"):
path = path[1:]
deeplearning_logger.info("component url:" + component_url)
deeplearning_logger.info("hdfs cluster:" + hdfs_cluster)
deeplearning_logger.info("delete path:" + path)
delete_path(component_url, hdfs_cluster, [path], True, user)
# logger.info(clean_data)
class ModelIcebergQuerySetSinkNode(SinkNode):
def __init__(self, builder):
super().__init__(builder)
self.iceberg_conf = builder.iceberg_conf
def create_sink(self, dataset):
iceberg_table_name = self.output["iceberg_table_name"]
table_info = iceberg_table_name.split(".")
table = tables.load_table(table_info[0], table_info[1], self.iceberg_conf)
iterator = dataset.as_numpy_iterator()
column_names = [field_item["field_name"] for field_item in self.fields]
for item in iterator:
tables.append_table(table, column_names, [item], {})
class ModelIcebergResultSetSinkNode(SinkNode):
def __init__(self, builder):
super().__init__(builder)
self.iceberg_conf = builder.iceberg_conf
def create_sink(self, dataset):
tf_config = json.loads(os.environ["TF_CONFIG"])
index = tf_config["task"]["index"]
if index > 0:
return
# 仅index=0的才进行落地
sink_dt_event_time = self.output["dt_event_timestamp"]
# 生成预留字段
data_time = datetime.fromtimestamp(sink_dt_event_time / 1000)
the_date = data_time.strftime("%Y%m%d")
dt_event_time = data_time.strftime("%Y-%m-%d %H:00:00")
local_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
iceberg_table_name = self.output["iceberg_table_name"]
table_info = iceberg_table_name.split(".")
table = tables.load_table(table_info[0], table_info[1], self.iceberg_conf)
iterator = dataset.as_numpy_iterator()
column_names = []
for field_item in self.fields:
if field_item["field"] not in ["_startTime_", "_endTime_"]:
column_names.append(field_item["field"].lower())
column_names.append(IcebergReservedField.start_time.value.field_name)
column_names.append(IcebergReservedField.end_time.value.field_name)
column_names.append(IcebergReservedField.thedate.value.field_name)
column_names.append(IcebergReservedField.dteventtime.value.field_name)
column_names.append(IcebergReservedField.dteventtimestamp.value.field_name)
column_names.append(IcebergReservedField.localtime.value.field_name)
table_records = []
for item_array in iterator:
merge_feature_label_list = []
for item in item_array:
merge_feature_label_list.extend(item.tolist())
final_result_list = []
final_result_list.extend(
self.convert_target_type(column_names[0 : len(column_names) - 6], merge_feature_label_list)
)
# 增加预留字段
final_result_list.extend(
[
JString(str(self.output["time_range_list"][0]["start_time"])),
JString(str(self.output["time_range_list"][0]["end_time"])),
]
)
final_result_list.extend([JInt(the_date), dt_event_time, JLong(sink_dt_event_time), local_time])
table_records.append(final_result_list)
tables.append_table(table, column_names, table_records, {})
def convert_target_type(self, column_names, origin_list):
# 类型转换,dataset中的值全部为float,需要根据实际情况转换为目前类型
field_map = {field["field"].lower(): field for field in self.fields}
new_value_list = []
for column_index in range(0, len(column_names)):
column_name = column_names[column_index]
column_value = origin_list[column_index]
column_type = field_map[column_name]["type"]
if column_type == "long":
new_value_list.append(JLong(column_value))
elif column_type == "int":
new_value_list.append(JInt(column_value))
elif column_type == "string":
new_value_list.append(JString(column_value))
else:
new_value_list.append(JDouble(column_value))
return new_value_list
class SinkNodeBuilder(Builder):
output = None
dt_event_timestamp = 0
def __init__(self, info):
super().__init__(info)
self.init_builder(info)
def init_builder(self, info):
pass
class ModelSinkNodeBuilder(SinkNodeBuilder):
table_type = None
def __init__(self, info):
super().__init__(info)
def init_builder(self, info):
output_info = info["output"]
self.table_type = output_info.get("table_type", TableType.OTHER.value)
path = output_info["path"]
self.output = {
"path": path,
"format": output_info["format"],
"mode": info["output_mode"],
"cluster_group": output_info["cluster_group"],
"hdfs_user": output_info["hdfs_user"],
"component_url": output_info["component_url"],
}
def build(self):
return ModelSinkNode(self)
class ModelIcebergQuerySetSinkNodeBuilder(SinkNodeBuilder):
iceberg_conf = None
def __init__(self, info):
super().__init__(info)
def init_builder(self, info):
self.iceberg_conf = info["iceberg_conf"]
iceberg_table_name = info["iceberg_table_name"]
output_mode = info["output_mode"]
self.output = {"iceberg_table_name": iceberg_table_name, "mode": output_mode}
def build(self):
return ModelIcebergQuerySetSinkNode(self)
class ModelIcebergResultSetSinkNodeBuilder(SinkNodeBuilder):
iceberg_conf = None
def __init__(self, info):
super().__init__(info)
def init_builder(self, info):
self.iceberg_conf = info["iceberg_conf"]
iceberg_table_name = info["iceberg_table_name"]
self.dt_event_timestamp = info["dt_event_time"]
end_time = self.dt_event_timestamp + 3600 * 1000
self.output = {
"iceberg_table_name": iceberg_table_name,
"mode": "overwrite",
"dt_event_timestamp": self.dt_event_timestamp,
"time_range_list": [{"start_time": self.dt_event_timestamp, "end_time": end_time}],
}
def build(self):
return ModelIcebergResultSetSinkNode(self)
class ModelIcebergResultSetSinkNodeBuilderV2(SinkNodeBuilder):
iceberg_conf = None
def __init__(self, info):
super().__init__(info)
def init_builder(self, info):
self.iceberg_conf = info["storage_conf"]["storekit_hdfs_conf"]
iceberg_table_name = info["storage_conf"]["storekit_hdfs_conf"]["physical_table_name"]
schedule_time_in_hour = PeriodicTimeHelper.round_schedule_timestamp_to_hour(info["schedule_time"])
schedule_time_obj = BatchTimeStamp(schedule_time_in_hour)
data_offset_obj = BatchTimeDelta()
data_offset_obj.init_delta_from_string(info["data_time_offset"])
self.dt_event_timestamp = schedule_time_obj.minus(data_offset_obj).get_time_in_mills()
end_time = self.dt_event_timestamp + 3600 * 1000
self.output = {
"iceberg_table_name": iceberg_table_name,
"mode": "overwrite",
"dt_event_timestamp": self.dt_event_timestamp,
"time_range_list": [{"start_time": self.dt_event_timestamp, "end_time": end_time}],
"storage_conf": info["storage_conf"],
}
def build(self):
return ModelIcebergResultSetSinkNode(self)
|
py | b400fc15e814fef4f9d4834aad9791115f4b9e3d | ###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
''' Contains tests for modifing FleurinpData with Fleurinpmodifier '''
import os
import copy
import pytest
from aiida_fleur.data.fleurinpmodifier import FleurinpModifier
# Collect the input files
file_path1 = '../files/inpxml/FePt/inp.xml'
inpxmlfilefolder = os.path.dirname(os.path.abspath(__file__))
inpxmlfilefolder = os.path.abspath(os.path.join(inpxmlfilefolder, file_path1))
def test_fleurinp_modifier1(create_fleurinp):
"""Tests if fleurinp_modifier with various modifations on species"""
from masci_tools.io.fleurxmlmodifier import ModifierTask
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
fm = FleurinpModifier(fleurinp_tmp)
fm.set_inpchanges({'dos': True, 'Kmax': 3.9})
fm.shift_value({'Kmax': 0.1}, 'rel')
fm.shift_value_species_label(' 222', 'radius', 3, mode='abs')
fm.set_species('all', {'mtSphere': {'radius': 3.333}})
fm.undo()
changes = fm.changes()
assert changes == [
ModifierTask(name='set_inpchanges', args=({
'dos': True,
'Kmax': 3.9
},), kwargs={}),
ModifierTask(name='shift_value', args=({
'Kmax': 0.1
}, 'rel'), kwargs={}),
ModifierTask(name='shift_value_species_label',
args=(' 222', 'radius', 3),
kwargs={'mode': 'abs'})
]
fm.show(validate=True)
fm.freeze()
fm = FleurinpModifier(fleurinp_tmp)
fm.set_inpchanges({'dos': True, 'Kmax': 3.9})
fm.undo(revert_all=True)
changes = fm.changes()
assert len(changes) == 0
def test_fleurinp_modifier2(create_fleurinp, inpxml_etree, eval_xpath):
"""Tests if fleurinp_modifier with various other modifations methods,
the detailed tests for method functionality is tested elsewhere."""
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
etree = inpxml_etree(inpxmlfilefolder)
fm = FleurinpModifier(fleurinp_tmp)
actions = fm.get_avail_actions()
assert isinstance(actions, dict)
new_tag = eval_xpath(etree, '/fleurInput/calculationSetup/scfLoop')
with pytest.deprecated_call():
fm.delete_tag('/fleurInput/calculationSetup/scfLoop')
with pytest.deprecated_call():
fm.replace_tag('/fleurInput/calculationSetup/cutoffs', new_tag)
with pytest.deprecated_call():
fm.delete_att('/fleurInput/calculationSetup/soc', 'theta')
with pytest.deprecated_call():
fm.create_tag('/fleurInput/calculationSetup/soc', 'theta')
with pytest.deprecated_call():
fm.xml_set_all_text('/fleurInput/cell/symmetryOperations/symOp/row-1', 'test text')
with pytest.deprecated_call():
fm.xml_set_text_occ('/fleurInput/cell/symmetryOperations/symOp/row-1', 'test text')
with pytest.deprecated_call():
fm.xml_set_text('/fleurInput/cell/symmetryOperations/symOp/row-1', 'test text')
with pytest.deprecated_call():
fm.xml_set_all_attribv('/fleurInput/calculationSetup/soc', 'theta', 12)
with pytest.deprecated_call():
fm.xml_set_first_attribv('/fleurInput/calculationSetup/soc', 'theta', 12)
with pytest.deprecated_call():
fm.xml_set_attribv_occ('/fleurInput/calculationSetup/soc', 'theta', 12)
fm.set_species_label(' 222', {'mtSphere': {'radius': 3.333}})
with pytest.deprecated_call():
fm.set_atomgr_att_label(attributedict={'force': {'relaxXYZ': 'FFF'}}, atom_label=' 222')
with pytest.deprecated_call():
fm.set_atomgr_att(attributedict={'force': {'relaxXYZ': 'TFF'}}, species='Fe-1')
#fm.set_nkpts(500, gamma='T')
#fm.set_kpath({'gamma': (0, 0, 0), 'L': (0.1, 0.1, 0.1)}, 300)
with pytest.deprecated_call():
fm.add_num_to_att('/fleurInput/calculationSetup/scfLoop', 'minDistance', 4)
#fm.set_species1
fm.show()
def test_fleurinp_modifier_regression(create_fleurinp, inpxml_etree, file_regression):
"""Tests if fleurinp_modifier with various other modifations methods,
the detailed tests for method functionality is tested elsewhere."""
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
fm = FleurinpModifier(fleurinp_tmp)
fm.set_inpchanges({'dos': True, 'Kmax': 3.9})
fm.shift_value({'Kmax': 0.1}, 'rel')
fm.shift_value_species_label(' 222', 'radius', 3, mode='abs')
fm.set_species('all', {'mtSphere': {'radius': 3.333}})
#fm.set_nkpts(500, gamma='T')
#fm.set_kpath({'gamma': (0, 0, 0), 'L': (0.1, 0.1, 0.1)}, 300)
with pytest.deprecated_call():
fm.add_num_to_att('/fleurInput/calculationSetup/scfLoop', 'minDistance', 4)
#fm.set_species1
fm.show()
new_fleurinp = fm.freeze()
file_regression.check(new_fleurinp.get_content('inp.xml'), extension='.xml')
def test_fleurinp_modifier_included_files(create_fleurinp, inpxml_etree, file_regression):
"""Tests if fleurinp_modifier with various other modifations methods,
the detailed tests for method functionality is tested elsewhere."""
TEST_FOLDER = os.path.dirname(os.path.abspath(__file__))
TEST_FOLDER = os.path.abspath(os.path.join(TEST_FOLDER, '../files/included_xml_files'))
INPXML_FILE = os.path.join(TEST_FOLDER, 'inp.xml')
KPTSXML_FILE = os.path.join(TEST_FOLDER, 'kpts.xml')
SYMXML_FILE = os.path.join(TEST_FOLDER, 'sym.xml')
fleurinp_tmp = create_fleurinp(INPXML_FILE, additional_files=[KPTSXML_FILE, SYMXML_FILE])
fm = FleurinpModifier(fleurinp_tmp)
#Modify main inp.xml file
fm.set_inpchanges({'dos': True, 'Kmax': 3.9})
fm.shift_value({'Kmax': 0.1}, 'rel')
#Modify included xml files
fm.delete_tag('symmetryOperations')
fm.create_tag('symmetryOperations')
fm.create_tag('kPointList')
fm.create_tag('kPoint', occurrences=0)
fm.set_attrib_value('name', 'TEST', contains='kPointList', occurrences=0)
fm.set_text('kPoint', [0.0, 0.0, 0.0],
complex_xpath="/fleurInput/cell/bzIntegration/kPointLists/kPointList[@name='TEST']/kPoint")
fm.show()
new_fleurinp = fm.freeze()
assert new_fleurinp.files == ['kpts.xml', 'sym.xml', 'inp.xml']
file_content = [
new_fleurinp.get_content('inp.xml'),
new_fleurinp.get_content('kpts.xml'),
new_fleurinp.get_content('sym.xml')
]
file_regression.check('\n'.join(file_content), extension='.xml')
#For this test we need a input file with defined LDA+U procedures
file_path2 = '../files/inpxml/GaAsMultiForceXML/inp.xml'
inpxmlfilefolder2 = os.path.dirname(os.path.abspath(__file__))
inpxmlfilefolder2 = os.path.abspath(os.path.join(inpxmlfilefolder2, file_path2))
def test_fleurinp_modifier_set_nmmpmat(create_fleurinp):
"""Tests if set_nmmpmat works on fleurinp modifier works, with right interface"""
fleurinp_tmp = create_fleurinp(inpxmlfilefolder2)
fm = FleurinpModifier(fleurinp_tmp)
fm.set_nmmpmat('Ga-1', orbital=2, spin=1, state_occupations=[1, 2, 3, 4, 5])
fm.set_nmmpmat('As-2', orbital=1, spin=1, denmat=[[1, -2, 3], [4, -5, 6], [7, -8, 9]])
# Does not validate
# Found invalid diagonal element for species Ga-1, spin 1 and l=2
with pytest.raises(ValueError):
fm.show(validate=True, display=False)
new_fleurinp = fm.freeze()
assert 'n_mmp_mat' in new_fleurinp.files
def test_fleurinp_modifier_instance_modifications(create_fleurinp):
"""Tests if set_nmmpmat works on fleurinp modifier works, with right interface"""
fleurinp_tmp = create_fleurinp(inpxmlfilefolder2)
n_mmp_mat_file = os.path.dirname(os.path.abspath(__file__))
n_mmp_mat_file = os.path.abspath(os.path.join(n_mmp_mat_file, '../files/n_mmp_mat/n_mmp_mat_GaAsMultiForceXML'))
fm = FleurinpModifier(fleurinp_tmp)
fm.set_file(n_mmp_mat_file, dst_filename='n_mmp_mat')
tasks_before = copy.deepcopy(fm._tasks)
fm.show()
assert fm._tasks == tasks_before
fm.validate()
assert fm._tasks == tasks_before
new_fleurinp = fm.freeze()
assert 'n_mmp_mat' in new_fleurinp.files
fm = FleurinpModifier(new_fleurinp)
fm.del_file('n_mmp_mat')
new_fleurinp = fm.freeze()
assert 'n_mmp_mat' not in new_fleurinp.files
def test_fleurinp_modifier_instance_modifications_node(create_fleurinp):
"""Tests if set_nmmpmat works on fleurinp modifier works, with right interface"""
from aiida.orm import FolderData
fleurinp_tmp = create_fleurinp(inpxmlfilefolder2)
n_mmp_mat_folder = os.path.dirname(os.path.abspath(__file__))
n_mmp_mat_folder = os.path.abspath(os.path.join(n_mmp_mat_folder, '../files/n_mmp_mat'))
n_mmp_mat_folder = FolderData(tree=n_mmp_mat_folder)
n_mmp_mat_folder.store()
fm = FleurinpModifier(fleurinp_tmp)
fm.set_file('n_mmp_mat_GaAsMultiForceXML', dst_filename='n_mmp_mat', node=n_mmp_mat_folder)
new_fleurinp = fm.freeze()
assert 'n_mmp_mat' in new_fleurinp.files
fm = FleurinpModifier(new_fleurinp)
fm.del_file('n_mmp_mat')
deleted_file = fm.freeze()
assert 'n_mmp_mat' not in deleted_file.files
fm = FleurinpModifier(new_fleurinp)
fm.del_file('n_mmp_mat')
fm.set_file('n_mmp_mat_GaAsMultiForceXML', dst_filename='n_mmp_mat', node=n_mmp_mat_folder.uuid)
new_fleurinp = fm.freeze()
assert 'n_mmp_mat' in new_fleurinp.files
def test_fleurinp_modifier_set_kpointsdata(create_fleurinp):
"""Test if setting a kpoints list to a fleurinp data node works"""
from aiida.orm import KpointsData
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
fleurinp_tmp.store() # needed?
struc = fleurinp_tmp.get_structuredata_ncf()
kps = KpointsData()
kps.set_cell(struc.cell)
kps.pbc = struc.pbc
kpoints_pos = [[0.0, 0.0, 0.0], [0.0, 0.5, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.5], [0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
kpoints_weight = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# Fleur renormalizes
kps.set_kpoints(kpoints_pos, cartesian=False, weights=kpoints_weight)
kps.store() # needed, because node has to be loaded...
#print(fleurinp_tmp)
fm = FleurinpModifier(fleurinp_tmp)
fm.set_kpointsdata(kps)
fm.show(validate=True, display=False)
fm.freeze()
# check if kpoint node is input into modification
# uuid of node show also work
fm = FleurinpModifier(fleurinp_tmp)
fm.set_kpointsdata(kps.uuid)
fm.freeze()
def test_fleurinpmodifier_error_messages(create_fleurinp):
"""Test error interface of fleurinpmodifier"""
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
fm = FleurinpModifier(fleurinp_tmp)
fm._tasks.append(('not_existent', [1, 2, 3], {'Random_arg': 'Does not make sense'})) # task does not exists.
with pytest.raises(ValueError):
fm.freeze()
fm = FleurinpModifier(fleurinp_tmp)
def test_fleurinpmodifier_element_serialization(create_fleurinp):
"""Tests of fleurinpmodifier registration methods accepting etree.Elements as arguments
If any of these don't serialize the elements correctly you will see an error that etree.fromstring
is passed an empty list (This is a weird side effect for etree._Element of the automatic serialization done in aiida-core)
"""
from lxml import etree
fleurinp_tmp = create_fleurinp(inpxmlfilefolder)
fm = FleurinpModifier(fleurinp_tmp)
fm.create_tag(etree.Element('expertModes'))
fm.delete_tag('expertModes')
fm.create_tag(tag=etree.Element('expertModes'))
fm.delete_tag('expertModes')
fm.create_tag(etree.Element('expertModes'), '/fleurInput/calculationSetup')
fm.delete_tag('expertModes')
fm.xml_create_tag('/fleurInput/calculationSetup', etree.Element('expertModes'))
fm.delete_tag('expertModes')
fm.xml_create_tag('/fleurInput/calculationSetup', element=etree.Element('expertModes'))
fm.delete_tag('expertModes')
fm.xml_create_tag('/fleurInput/calculationSetup', etree.Element('expertModes'), 0)
fm.replace_tag('expertModes', etree.Element('expertModes'))
fm.replace_tag('expertModes', element=etree.Element('expertModes'))
fm.replace_tag('expertModes', etree.Element('expertModes'), '/fleurInput/calculationSetup/expertModes')
fm.xml_replace_tag('/fleurInput/calculationSetup/expertModes', etree.Element('expertModes'))
fm.xml_replace_tag('/fleurInput/calculationSetup/expertModes', element=etree.Element('expertModes'))
fm.xml_replace_tag('/fleurInput/calculationSetup/expertModes', etree.Element('expertModes'), 0)
fm.show()
fm.freeze()
|
py | b400fc66367a574a219a26513a4d03815face3e9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines a class for aperture masks.
"""
import warnings
import astropy.units as u
import numpy as np
__all__ = ['ApertureMask']
class ApertureMask:
"""
Class for an aperture mask.
Parameters
----------
data : array_like
A 2D array representing the fractional overlap of an aperture
on the pixel grid. This should be the full-sized (i.e., not
truncated) array that is the direct output of one of the
low-level `photutils.geometry` functions.
bbox : `photutils.aperture.BoundingBox`
The bounding box object defining the aperture minimal bounding
box.
"""
def __init__(self, data, bbox):
self.data = np.asanyarray(data)
if self.data.shape != bbox.shape:
raise ValueError('mask data and bounding box must have the same '
'shape')
self.bbox = bbox
self._mask = (self.data == 0)
def __array__(self, dtype=None):
"""
Array representation of the mask data array (e.g., for
matplotlib).
"""
return np.asarray(self.data, dtype=dtype)
@property
def shape(self):
"""
The shape of the mask data array.
"""
return self.data.shape
def get_overlap_slices(self, shape):
"""
Get slices for the overlapping part of the aperture mask and a
2D array.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array.
Returns
-------
slices_large : tuple of slices or `None`
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
`None` is returned if there is no overlap of the bounding
box with the given image shape.
slices_small : tuple of slices or `None`
A tuple of slice objects for each axis of the aperture mask
array such that ``small_array[slices_small]`` extracts the
region that is inside the large array. `None` is returned if
there is no overlap of the bounding box with the given image
shape.
"""
return self.bbox.get_overlap_slices(shape)
def to_image(self, shape, dtype=float):
"""
Return an image of the mask in a 2D array of the given shape,
taking any edge effects into account.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of the output array.
dtype : data-type, optional
The desired data type for the array. This should be a
floating data type if the `ApertureMask` was created with
the "exact" or "subpixel" mode, otherwise the fractional
mask weights will be altered. A integer data type may be
used if the `ApertureMask` was created with the "center"
mode.
Returns
-------
result : `~numpy.ndarray`
A 2D array of the mask.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
# find the overlap of the mask on the output image shape
slices_large, slices_small = self.get_overlap_slices(shape)
if slices_small is None:
return None # no overlap
# insert the mask into the output image
image = np.zeros(shape, dtype=dtype)
image[slices_large] = self.data[slices_small]
return image
def cutout(self, data, fill_value=0., copy=False):
"""
Create a cutout from the input data over the mask bounding box,
taking any edge effects into account.
Parameters
----------
data : array_like
A 2D array on which to apply the aperture mask.
fill_value : float, optional
The value used to fill pixels where the aperture mask does
not overlap with the input ``data``. The default is 0.
copy : bool, optional
If `True` then the returned cutout array will always be hold
a copy of the input ``data``. If `False` and the mask is
fully within the input ``data``, then the returned cutout
array will be a view into the input ``data``. In cases
where the mask partially overlaps or has no overlap with the
input ``data``, the returned cutout array will always hold a
copy of the input ``data`` (i.e., this keyword has no
effect).
Returns
-------
result : `~numpy.ndarray` or `None`
A 2D array cut out from the input ``data`` representing the
same cutout region as the aperture mask. If there is a
partial overlap of the aperture mask with the input data,
pixels outside of the data will be assigned to
``fill_value``. `None` is returned if there is no overlap
of the aperture with the input ``data``.
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
# find the overlap of the mask on the output image shape
slices_large, slices_small = self.get_overlap_slices(data.shape)
if slices_small is None:
return None # no overlap
cutout_shape = (slices_small[0].stop - slices_small[0].start,
slices_small[1].stop - slices_small[1].start)
if cutout_shape == self.shape:
cutout = data[slices_large]
if copy:
cutout = np.copy(cutout)
return cutout
# cutout is always a copy for partial overlap
if ~np.isfinite(fill_value):
dtype = float
else:
dtype = data.dtype
cutout = np.zeros(self.shape, dtype=dtype)
cutout[:] = fill_value
cutout[slices_small] = data[slices_large]
if isinstance(data, u.Quantity):
cutout <<= data.unit
return cutout
def multiply(self, data, fill_value=0.):
"""
Multiply the aperture mask with the input data, taking any edge
effects into account.
The result is a mask-weighted cutout from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
Returns
-------
result : `~numpy.ndarray` or `None`
A 2D mask-weighted cutout from the input ``data``. If
there is a partial overlap of the aperture mask with the
input data, pixels outside of the data will be assigned to
``fill_value`` before being multiplied with the mask. `None`
is returned if there is no overlap of the aperture with the
input ``data``.
"""
cutout = self.cutout(data, fill_value=fill_value)
if cutout is None:
return None
else:
# ignore multiplication with non-finite data values
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
weighted_cutout = cutout * self.data
# fill values outside of the mask but within the bounding box
weighted_cutout[self._mask] = fill_value
return weighted_cutout
def get_values(self, data, mask=None):
"""
Get the mask-weighted pixel values from the data as a 1D array.
If the ``ApertureMask`` was created with ``method='center'``,
(where the mask weights are only 1 or 0), then the returned
values will simply be pixel values extracted from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array from which to get mask-weighted values.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is not returned in the result.
Returns
-------
result : `~numpy.ndarray`
A 1D array of mask-weighted pixel values from the input
``data``. If there is no overlap of the aperture with the
input ``data``, the result will be an empty array with shape
(0,).
"""
slc_large, slc_small = self.get_overlap_slices(data.shape)
if slc_large is None:
return np.array([])
cutout = data[slc_large]
apermask = self.data[slc_small]
pixel_mask = (apermask > 0) # good pixels
if mask is not None:
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape')
pixel_mask &= ~mask[slc_large]
# ignore multiplication with non-finite data values
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
return (cutout * apermask)[pixel_mask]
|
py | b400fd372ef4a504ce034bf9a1333d949f9e9301 | from audioop import add
from numpy import mat
import tensorflow as tf
# 2차원 배열 정의
list_of_list = [[10, 20], [30, 40]]
# 텐서 변환 - constant 함수에 2차원 배열 입력
mat1 = tf.constant(list_of_list)
# 랭크 확인
print("rank:", tf.rank(mat1))
# 텐서 출력
print("mat1:", mat1)
# 1차원 벡터 정의
vec1 = tf.constant([1, 0])
vec2 = tf.constant([-1, 2])
# 텐서 변환 - stack 함수로 1차원 배열을 위아래로 쌓기
mat2 = tf.stack([vec1, vec2])
# 랭크 확인
print("rank:", tf.rank(mat2))
# 텐서 출력하기
print("mat2:", mat2)
# element-by-element 연산
element_mul = tf.math.multiply(mat1, mat2)
print("result:", element_mul)
print("rank:", tf.rank(element_mul))
# 브로드캐스팅 연산
element_bc = tf.math.multiply(mat1, 3)
print("result:", element_bc)
print("rank:", tf.rank(element_bc))
# 행렬곱 연산
mat_mul = tf.matmul(mat1, mat2)
print("result:", mat_mul)
print("rank:", tf.rank(mat_mul))
# 덧셈 연산
add1 = tf.math.add(mat1, mat2)
print("result:", add1)
print("rank:", tf.rank(add1))
# 덧셈 연산(파이썬)
add2 = mat1 + mat2
print("result:", add2)
print("rank:", tf.rank(add2))
# 텐서를 넘파이로 변환
np_arr = mat_mul.numpy()
print(type(np_arr))
print(np_arr)
|
py | b400fd50edeeef02d6935f56d7b80c4f71e37fc6 | # coding: utf-8
"""
some additional template filters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyleft: 2007-2009 by the django-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import datetime
from django_tools.utils.time_utils import datetime2float
if __name__ == "__main__":
# For doctest only
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "django.conf.global_settings"
from django.template.defaultfilters import stringfilter
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
CHMOD_TRANS_DATA = (
u"---", u"--x", u"-w-", u"-wx", u"r--", u"r-x", u"rw-", u"rwx"
)
def chmod_symbol(octal_value):
"""
Transform a os.stat().st_octal_value octal value to a symbolic string.
ignores meta infromation like SUID, SGID or the Sticky-Bit.
e.g. 40755 -> rwxr-xr-x
>>> chmod_symbol(644)
u'rw-r--r--'
>>> chmod_symbol(40755)
u'rwxr-xr-x'
>>> chmod_symbol("777")
u'rwxrwxrwx'
"""
octal_value_string = str(octal_value)[-3:] # strip "meta info"
return u''.join(CHMOD_TRANS_DATA[int(num)] for num in octal_value_string)
chmod_symbol.is_safe = True
chmod_symbol = stringfilter(chmod_symbol)
def get_oct(value):
"""
Convert an integer number to an octal string.
"""
try:
return oct(value)
except:
return value
get_oct.is_safe = False
def human_duration(t):
"""
Converts a time duration into a friendly text representation.
>>> human_duration("type error")
Traceback (most recent call last):
...
TypeError: human_duration() argument must be timedelta, integer or float)
>>> human_duration(datetime.timedelta(microseconds=1000))
u'1.0 ms'
>>> human_duration(0.01)
u'10.0 ms'
>>> human_duration(0.9)
u'900.0 ms'
>>> human_duration(datetime.timedelta(seconds=1))
u'1.0 sec'
>>> human_duration(65.5)
u'1.1 min'
>>> human_duration((60 * 60)-1)
u'59.0 min'
>>> human_duration(60*60)
u'1.0 hours'
>>> human_duration(1.05*60*60)
u'1.1 hours'
>>> human_duration(datetime.timedelta(hours=24))
u'1.0 days'
>>> human_duration(2.54 * 60 * 60 * 24 * 365)
u'2.5 years'
"""
if isinstance(t, datetime.timedelta):
# timedelta.total_seconds() is new in Python 2.7
t = datetime2float(t)
elif not isinstance(t, (int, float)):
raise TypeError("human_duration() argument must be timedelta, integer or float)")
chunks = (
(60 * 60 * 24 * 365, _('years')),
(60 * 60 * 24 * 30, _('months')),
(60 * 60 * 24 * 7, _('weeks')),
(60 * 60 * 24, _('days')),
(60 * 60, _('hours')),
)
if t < 1:
return _("%.1f ms") % round(t * 1000, 1)
if t < 60:
return _("%.1f sec") % round(t, 1)
if t < 60 * 60:
return _("%.1f min") % round(t / 60, 1)
for seconds, name in chunks:
count = t / seconds
if count >= 1:
count = round(count, 1)
break
return "%(number).1f %(type)s" % {'number': count, 'type': name}
human_duration.is_safe = True
if __name__ == "__main__":
import doctest
print doctest.testmod(verbose=False)
|
py | b400feaef2e3c61c63fe3bf6a0ff6b8179dbd4a6 | from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name = 'bingsearchpy',
packages = ['bingsearchpy'],
version = '0.7',
description=('bing search engine for python'),
long_description=readme(),
long_description_content_type="text/markdown",
license='MIT',
author = 'mrxxx04',
author_email = '[email protected]',
url = 'https://github.com/rizki4106/bingsearchpy',
download_url = 'https://github.com/rizki4106/bingsearchpy/archive/v.0.1.zip',
keywords = ['google','msn','searchengine', 'microsoft', 'google'],
install_requires=[
'requests',
'beautifulsoup4',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) |
py | b400feda3e5b20db9ab4d1fb3b2f0ac0ce435c7b | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_service import service as oslo_service
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
ProcessLauncher = oslo_service.ProcessLauncher
"""Worker process launcher.
Derives the oslo process launcher to
launch childrens with python multiprocessing
as oppose to os.fork(), coz, communication
is needed from parent->child not just the
parallel execution.
"""
class NfpLauncher(ProcessLauncher):
def __init__(self, conf):
super(NfpLauncher, self).__init__(conf)
def child(self, service, ppipe, cpipe, controller):
service.parent_pipe = ppipe
service.pipe = cpipe
service.controller = controller
self.launcher = self._child_process(service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(
self.launcher)
if not oslo_service._is_sighup_and_daemon(signo):
self.launcher.wait()
break
self.launcher.restart()
os._exit(status)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = self.fork_child(wrap)
message = "Started Child Process %d" % (pid)
LOG.debug(message)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def fork_child(self, wrap):
# Default use os.fork to create a child
pid = os.fork()
if pid == 0:
self.launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(
self.launcher)
if not oslo_service._is_sighup_and_daemon(signo):
self.launcher.wait()
break
self.launcher.restart()
os._exit(status)
return pid
|
py | b400ff23aabe6769f71ab877e1463eab4a4e47e3 | import re
from tldextract import extract
from utils.logger import Logger
def port_parse(port: str) -> str:
"""
Parse port, checks for validity.
:param port: The port number.
:type port: str
:return: stripped port.
:rtype: str
:raise AssertionError: If invalid number of port.
"""
assert 1 <= int(port) <= 65535, "The port number is invalid!"
return str(int(port)) # truncate floating point if any
def link_sep(input_url: str) -> [str, str]:
"""
Strip URL with and obtain url and port.
:param input_url: The url to strip
:type input_url: str
:return: stripped url and the port.
:rtype: list of str
"""
splitted = url_strip(input_url).rsplit(":", 1)
if len(splitted) != 2:
splitted.append("443")
splitted[1] = splitted[1].split("/", 1)[0].split("?", 1)[0]
splitted[1] = port_parse(splitted[1])
return splitted
def url_strip(input_url, strip_www=False) -> str:
"""
Strip URL with regex and obtain domain (DEPRECATED, USE url_domain).
deprecated:: 2.0.alpha
Use :func:`url_domain` instead.
:param input_url: The url to strip
:type input_url: str
:param strip_www: Strip also the www
:type strip_www: bool
:return: stripped url.
:rtype: str
"""
url = (
re.compile(r"https?://") if not strip_www else re.compile(r"https?://(www\.)?")
)
return url.sub("", input_url).strip().strip("/")
def url_domain(url, keep_subdomain=True) -> str:
"""
Strip URL and obtain domain.
:param url: The url to strip
:type url: str
:param keep_subdomain: keep the subdomain, default True
:type keep_subdomain: bool
:return: stripped url.
:rtype: str
"""
results = extract(url)
output = (
f"{results.subdomain + '.' if keep_subdomain and results.subdomain != '' else ''}{results.domain}"
f"{'.' + results.suffix if results.suffix != '' else ''}"
)
Logger("URL_Domain").debug(f"parsed {url} into {output}")
return output
def has_wildcard(url) -> bool:
"""
Check if the url contains a wildcard in last subdomain.
:param url: The url to check
:type url: str
:return: True if the url contains a wildcard in the last subdomain, False otherwise
:rtype: bool
"""
subdomain = extract(url).subdomain
return subdomain.split(".")[0] == "*" # check if last subdomain is a wildcard
def remove_wildcard(url) -> str:
"""
Remove the wildcard from the last subdomain.
:param url: The url to remove the wildcard from
:type url: str
:return: The url without the wildcard
:rtype: str
"""
return url_domain(url)[2:]
|
py | b400ff3d5687f20abcaa5cc4bc3368c6e96c1d39 | #!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "PicoBlade"
series_long = 'PicoBlade Connector System'
manufacturer = 'Molex'
orientation = 'V'
number_of_rows = 1
datasheet = 'http://www.molex.com/pdm_docs/sd/530470610_sd.pdf'
#pins_per_row per row
pins_per_row_range = range(2,16)
#Molex part number
#n = number of circuits per row
part_code = "53047-{n:02}10"
pitch = 1.25
drill = 0.5
pad_to_pad_clearance = 0.8
max_annular_ring = 0.4
min_annular_ring = 0.15
pad_size = [pitch - pad_to_pad_clearance, drill + 2*max_annular_ring]
if pad_size[0] - drill < 2*min_annular_ring:
pad_size[0] = drill + 2*min_annular_ring
if pad_size[0] - drill > 2*max_annular_ring:
pad_size[0] = drill + 2*max_annular_ring
pad_shape=Pad.SHAPE_OVAL
if pad_size[1] == pad_size[0]:
pad_shape=Pad.SHAPE_CIRCLE
def generate_one_footprint(pins, configuration):
mpn = part_code.format(n=pins)
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("Molex {:s}, {:s}, {:d} Pins per row ({:s}), generated with kicad-footprint-generator".format(series_long, mpn, pins_per_row, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
A = (pins - 1) * pitch
B = A + 1.8
C = A + 3
#connector width
W = 3.2
#side thickness
T = 0.4
#corner positions
x1 = (A - C) / 2
x2 = x1 + C
y2 = 1.15
y1 = y2 - W
off = configuration['silk_fab_offset']
pad_silk_off = configuration['silk_pad_clearance'] + configuration['silk_line_width']/2
body_edge={
'left':x1,
'right':x2,
'bottom':y2,
'top': y1
}
bounding_box = body_edge.copy()
# generate the pads
optional_pad_params = {}
if configuration['kicad4_compatible']:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_RECT
else:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_ROUNDRECT
kicad_mod.append(PadArray(start=[0,0], pincount=pins, x_spacing=pitch,
type=Pad.TYPE_THT, shape=pad_shape, size=pad_size, drill=drill,
layers=Pad.LAYERS_THT,
**optional_pad_params))
# outline on Fab
kicad_mod.append(RectLine(start=[x1,y1],end=[x2,y2],
layer='F.Fab', width=configuration['fab_line_width']))
# outline on SilkScreen
kicad_mod.append(RectLine(start=[x1,y1],end=[x2,y2],offset=off,
layer='F.SilkS', width=configuration['silk_line_width']))
inline = [
{'x': A/2,'y': y2 - T},
{'x': x1 + T,'y': y2 - T},
{'x': x1 + T,'y': 0},
{'x': x1 + T/2,'y': 0},
{'x': x1 + T/2,'y': -2*T},
{'x': x1 + T,'y': -2*T},
{'x': x1 + T,'y': y1 + T},
{'x': A/2,'y': y1 + T},
]
kicad_mod.append(PolygoneLine(polygone=inline,
layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=inline, x_mirror=A/2,
layer='F.SilkS', width=configuration['silk_line_width']))
#pin-1 mark
L = 1
kicad_mod.append(Line(start=[x1-0.4,y2+0.4], end=[x1-0.4,y2+0.4-L],
layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(Line(start=[x1-0.4,y2+0.4], end=[x1-0.4+L,y2+0.4],
layer='F.SilkS', width=configuration['silk_line_width']))
sl=1
pin = [
{'y': body_edge['bottom'], 'x': -sl/2},
{'y': body_edge['bottom'] - sl/sqrt(2), 'x': 0},
{'y': body_edge['bottom'], 'x': sl/2}
]
kicad_mod.append(PolygoneLine(polygone=pin,
width=configuration['fab_line_width'], layer='F.Fab'))
########################### CrtYd #################################
cx1 = roundToBase(bounding_box['left']-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy1 = roundToBase(bounding_box['top']-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cx2 = roundToBase(bounding_box['right']+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy2 = roundToBase(bounding_box['bottom'] + configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
kicad_mod.append(RectLine(
start=[cx1, cy1], end=[cx2, cy2],
layer='F.CrtYd', width=configuration['courtyard_line_width']))
######################### Text Fields ###############################
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge,
courtyard={'top':cy1, 'bottom':cy2},
fp_name=footprint_name, text_y_inside_position='top')
##################### Output and 3d model ############################
model3d_path_prefix = configuration.get('3d_model_prefix','${KICAD6_3DMODEL_DIR}/')
lib_name = configuration['lib_name_format_string'].format(series=series, man=manufacturer)
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name, fp_name=footprint_name)
kicad_mod.append(Model(filename=model_name))
output_dir = '{lib_name:s}.pretty/'.format(lib_name=lib_name)
if not os.path.isdir(output_dir): #returns false if path does not yet exist!! (Does not check path validity)
os.makedirs(output_dir)
filename = '{outdir:s}{fp_name:s}.kicad_mod'.format(outdir=output_dir, fp_name=footprint_name)
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='use confing .yaml files to create footprints.')
parser.add_argument('--global_config', type=str, nargs='?', help='the config file defining how the footprint will look like. (KLC)', default='../../tools/global_config_files/config_KLCv3.0.yaml')
parser.add_argument('--series_config', type=str, nargs='?', help='the config file defining series parameters.', default='../conn_config_KLCv3.yaml')
parser.add_argument('--kicad4_compatible', action='store_true', help='Create footprints kicad 4 compatible')
args = parser.parse_args()
with open(args.global_config, 'r') as config_stream:
try:
configuration = yaml.safe_load(config_stream)
except yaml.YAMLError as exc:
print(exc)
with open(args.series_config, 'r') as config_stream:
try:
configuration.update(yaml.safe_load(config_stream))
except yaml.YAMLError as exc:
print(exc)
configuration['kicad4_compatible'] = args.kicad4_compatible
for pins_per_row in pins_per_row_range:
generate_one_footprint(pins_per_row, configuration)
|
py | b400ff4f44b03ce9c780ef33732c6481154406fa | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pyotp.hotp import HOTP # noqa
from pyotp.otp import OTP # noqa
from pyotp.totp import TOTP # noqa
from . import utils # noqa
def random_base32(length=16, random=None,
chars=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')):
# Use secrets module if available (Python version >= 3.6) per PEP 506
try:
import secrets
random = secrets.SystemRandom()
except ImportError:
import random as _random
random = _random.SystemRandom()
return ''.join(
random.choice(chars)
for _ in range(length)
)
|
py | b4010069f8957861a0e0c73e3931d374d8ecb296 | #!/usr/bin/env python3
import argparse
import json
import shlex
from scapy.all import *
from scapy.layers.tls.record import TLS
from scapy.layers.tls.handshake import TLSClientHello, TLSServerHello
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--interface', type=str, dest='interface', default='eth0',
help='interface to use')
parser.add_argument('-c', '--count', type=int, dest='count', default=1000,
help='number of packets to send, 0 means send forever')
parser.add_argument('-f', '--filename', type=str, dest='filename', default=f'{sys.argv[0]}.pcap',
help='filename to save pcap')
parser.add_argument('-d', '--dir', type=str, dest='dir', default='.',
help='dir to save ja3 json files')
parser.description = """\
This is a Python program to sniff TLS traffic and parse ja3 signatures from that traffic. Writes ja3 as json files,
can easily be picked up from a queue dir or shipped to an api endpoint instead.
"""
args = parser.parse_args()
def write_pcap():
try:
print(f'Starting sniffer for {args.count} packets')
packets = sniff(iface=args.interface,
count=args.count,
prn=lambda x: x.summary(),
# just 'client hello'
# lfilter=lambda x: scapy.layers.tls.handshake.TLSClientHello in x)
# just 'server hello'
# lfilter=lambda x: scapy.layers.tls.handshake.TLSServerHello in x)
# both hellos
# lfilter=lambda x: (TLSClientHello or TLSServerHello) in x)
# TLS only, but _all_ tls
lfilter=lambda x: TLS in x)
# write out a pcap
wrpcap(args.filename, packets)
except KeyboardInterrupt:
sys.exit(0)
def get_ja3(ja3s=False):
'''
what kind of ja3 do you want?
:param ja3s:
:return:
'''
cmd = ''
res = []
if ja3s:
cmd = f'python3 ./ja3/python/ja3s.py --json {args.filename}'
else:
cmd = f'python3 ./ja3/python/ja3.py --json {args.filename}'
res = subprocess.run(shlex.split(cmd), check=True, stdout=subprocess.PIPE)
if res.returncode == 0:
return(res.stdout)
else:
print(f'could not dump ja3 json!')
sys.exit(res.returncode)
def main():
write_pcap()
# get client hello ja3
ja3_list = json.loads(get_ja3())
# get server hello ja3s
ja3_list.extend(json.loads(get_ja3(ja3s=True)))
for ja3 in ja3_list:
# print(f'{type(ja3)} - {ja3}')
# print(f'{ja3["timestamp"]} - {ja3["source_ip"]}')
print(ja3)
# # write to queue or dir or endpoint for more processing
# build a unique filename
jsonfname = f'{ja3["source_ip"]}:{ja3["source_port"]}'
jsonfname += f'-{ja3["destination_ip"]}:{ja3["destination_port"]}'
jsonfname += f'-{ja3["timestamp"]}-{ja3["ja3_digest"]}.json'
with open(f'{args.dir}/{jsonfname}', 'w') as outfile:
outfile.write(json.dumps(ja3, indent=4))
if __name__ == '__main__':
main()
|
py | b401011e7e3cc04a5fe106a331ee9c9162214681 | import json
from dvc.repo.plots.data import PlotData
from tests.func.metrics.utils import _write_json
def test_diff_dirty(tmp_dir, scm, dvc, run_copy_metrics):
metric_1 = [{"y": 2}, {"y": 3}]
_write_json(tmp_dir, metric_1, "metric_t.json")
run_copy_metrics(
"metric_t.json",
"metric.json",
plots_no_cache=["metric.json"],
commit="init",
)
metric_2 = [{"y": 3}, {"y": 5}]
_write_json(tmp_dir, metric_2, "metric_t.json")
run_copy_metrics(
"metric_t.json",
"metric.json",
plots_no_cache=["metric.json"],
commit="second",
)
metric_3 = [{"y": 5}, {"y": 6}]
_write_json(tmp_dir, metric_3, "metric_t.json")
run_copy_metrics(
"metric_t.json", "metric.json", plots_no_cache=["metric.json"]
)
plot_string = dvc.plots.diff(props={"fields": {"y"}})["metric.json"]
plot_content = json.loads(plot_string)
assert plot_content["data"]["values"] == [
{"y": 3, PlotData.INDEX_FIELD: 0, "rev": "HEAD"},
{"y": 5, PlotData.INDEX_FIELD: 1, "rev": "HEAD"},
{"y": 5, PlotData.INDEX_FIELD: 0, "rev": "workspace"},
{"y": 6, PlotData.INDEX_FIELD: 1, "rev": "workspace"},
]
assert plot_content["encoding"]["x"]["field"] == PlotData.INDEX_FIELD
assert plot_content["encoding"]["y"]["field"] == "y"
_write_json(tmp_dir, [{"y": 7}, {"y": 8}], "metric.json")
plot_string = dvc.plots.diff(props={"fields": {"y"}})["metric.json"]
plot_content = json.loads(plot_string)
assert plot_content["data"]["values"] == [
{"y": 3, PlotData.INDEX_FIELD: 0, "rev": "HEAD"},
{"y": 5, PlotData.INDEX_FIELD: 1, "rev": "HEAD"},
{"y": 7, PlotData.INDEX_FIELD: 0, "rev": "workspace"},
{"y": 8, PlotData.INDEX_FIELD: 1, "rev": "workspace"},
]
assert plot_content["encoding"]["x"]["field"] == PlotData.INDEX_FIELD
assert plot_content["encoding"]["y"]["field"] == "y"
|
py | b40101d7de6f7ea98d0e27e9272b03b8455acde3 | from textwrap import dedent
idx_doc = (
"If true, save an methylpy chromosome index for back compatibility. "
"If you only use methylpy to call DMR, this don't need to be True."
)
allc_path_doc = "Path to 1 ALLC file"
allc_paths_doc = (
"Single ALLC path contain wildcard OR multiple space separated ALLC paths "
"OR a file contains 1 ALLC path in each row."
)
allc_table_doc = (
"Contain all the ALLC file information in two tab-separated columns: "
"1. file_uid, 2. file_path. No header"
)
binarize_doc = (
"If set, binarize each single site in each individual ALLC file. "
"This means each cytosine will only contribute at most 1 cov and 0/1 mc, "
"this is suitable to account for single cell ALLC R1 R2 overlap issue, "
"Only use this on single cell ALLC, not bulk ALLC."
)
bin_sizes_doc = (
"Fix-size genomic bins can be defined by bin_sizes and chrom_size_path. "
"Space separated sizes of genome bins, each size will be count separately."
)
bw_bin_sizes_doc = "Bin size of the BigWig files."
chrom_size_path_doc = (
"Path to UCSC chrom size file. "
"This can be generated from the genome fasta or downloaded via UCSC fetchChromSizes tools. "
"All ALLCools functions will refer to this file whenever possible to check for "
"chromosome names and lengths, so it is crucial to use a chrom size file consistent "
"to the reference fasta file ever since mapping. "
"ALLCools functions will not change or infer chromosome names."
)
compress_level_doc = "Compression level for the output file"
cov_cutoff_doc = "Max cov filter for a single site in ALLC. Sites with cov > cov_cutoff will be skipped."
cpu_basic_doc = "Number of processes to use in parallel."
mc_contexts_doc = (
"Space separated mC context patterns to extract from ALLC. "
"The context length should be the same as ALLC file context. "
"Context pattern follows IUPAC nucleotide code, e.g. N for ATCG, H for ATC, Y for CT."
)
mc_context_mcad_doc = (
"mC context pattern to extract from ALLC. "
"Context pattern follows IUPAC nucleotide code, e.g. N for ATCG, H for ATC, Y for CT."
"Note that generate_mcad only take one mC context"
)
reference_fasta_doc = (
"Path to 1 genome reference FASTA file (the one used for mapping), "
"use samtools fadix to build .fai index first. Do not compress that file."
)
region_bed_names_doc = (
"Space separated names for each BED file provided in region_bed_paths."
)
region_bed_paths_doc = (
"Arbitrary genomic regions can be defined in several BED files to count on. "
"Space separated paths to each BED files, "
"The fourth column of the BED file should be unique id of the regions."
)
region_bed_path_mcad_doc = (
"Arbitrary genomic regions can be defined in one BED file to count on. "
"The fourth column of the BED file should be unique id of the regions."
)
region_doc = (
"Only extract records from certain genome region(s) via tabix, "
"multiple region can be provided in tabix form. If region is not None, will not run in parallel"
)
remove_additional_chrom_doc = (
"Whether to remove rows with unknown chromosome instead of raising KeyError"
)
rna_table_doc = (
"This is only for mCT data when we have RNA BAM file for each single cell. "
"Contain all the RNA BAM file information in 2 columns: 1. file_uid, 2. file_path. No header."
)
snp_doc = "If true, means the input allc contain snp information, and the allc processing will take care that."
split_strand_doc = "If true, Watson (+) and Crick (-) strands will be count separately"
strandness_doc = (
"What to do with strand information, possible values are: "
"1. both: save +/- strand together in one file; "
"2. split: save +/- strand into two separate files, with suffix contain Watson (+) and Crick (-); "
"3. merge: This will only merge the count on adjacent CpG in +/- strands, "
"only work for CpG like context. For non-CG context, its the same as both."
)
def doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__doc__ = dedent(obj.__doc__).format(**kwds)
return obj
return dec
|
py | b40102806e2a835be8eae58af9b28b0db41040a8 | def solution(A):
return 1 if set(A)==set(range(1,len(A))) else 0
if __name__ == "__main__":
A = [1,2,3,4,2]
X=solution(A)
print (X) |
py | b40102a7720769667e28c4e8c770843a0d317734 | """
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXAMPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
#VEHICLE
DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed.
MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer.
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number
PCA9685_I2C_BUSNUM = 1 #None will auto detect, which is fine on the pi. But other platforms should specify the bus num.
#DRIVETRAIN
#These options specify which chasis and motor setup you are using. Most are using SERVO_ESC.
#DC_STEER_THROTTLE uses HBridge pwm to control one steering dc motor, and one drive wheel motor
#DC_TWO_WHEEL uses HBridge pwm to control two drive motors, one on the left, and one on the right.
#SERVO_HBRIDGE_PWM use ServoBlaster to output pwm control from the PiZero directly to control steering, and HBridge for a drive motor.
#STEERING
STEERING_CHANNEL = 0 #channel on the 9685 pwm board 0-15
STEERING_LEFT_PWM = 460 #pwm value for full left steering
STEERING_RIGHT_PWM = 290 #pwm value for full right steering
#THROTTLE
THROTTLE_CHANNEL = 1 #channel on the 9685 pwm board 0-15
THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle
THROTTLE_STOPPED_PWM = 370 #pwm value for no movement
THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle
#Camera
IMAGE_W = 224
IMAGE_H = 224
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
CAMERA_VFLIP = False
CAMERA_HFLIP = False
# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation
CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically)
# Region of interst cropping
# only supported in Categorical and Linear models.
# If these crops values are too large, they will cause the stride values to become negative and the model with not be valid.
ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image
ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image
#Odometry
HAVE_ODOM = False # Do you have an odometer? Uses pigpio
#Intel T265
WHEEL_ODOM_CALIB = "calibration_odometry.json"
|
py | b401037237ad5042445606a4eb6082b205242d68 | import re, unittest
from unittest import mock
from unittest.mock import MagicMock
class Client(object):
def _come_to_a_bad_end(self):
pass
def wrap(self):
self._come_to_a_bad_end()
class ClientTests(unittest.TestCase):
@mock.patch.object(Client, '_come_to_a_bad_end', autospec=True)
def test_client_start2(self, *mocks):
client = Client()
master = MagicMock()
for mock in reversed(mocks):
m = re.search('function (.+) at', str(mock))
func_name = m[1]
# print('func_name =', func_name)
master.attach_mock(mock.mock, func_name)
client.wrap()
print('list(master.mock_calls) =', list(master.mock_calls))
print(master.mock_calls[0] == mocks[0])
print('mocks[0].mock == master.mock_calls[0] is ', mocks[0].mock == master.mock_calls[0])
# print(dir(master.mock_calls[0]))
# print(dir(mocks[0]))
print('------------------------------------------------')
print('mocks[0] =', mocks[0])
print('mocks[0].mock =', mocks[0].mock)
# print('mocks[0].mock.call =', mocks[0].mock.call)
print('mocks[0].mock_calls =', mocks[0].mock_calls)
print('type(mocks[0]) =', type(mocks[0]))
print('------------------------------------------------')
print('type(master.mock_calls[0]) =', type(master.mock_calls[0]))
print('master.mock_calls =', master.mock_calls)
print('master.mock_calls[0] =', master.mock_calls[0])
print('master.mock_calls[0].mock =', master.mock_calls[0].mock)
print('------------------------------------------------')
print('888 mocks[0].mock_calls[0] == master.mock_calls[0] is', mocks[0].mock_calls[0] == master.mock_calls[0])
# ------------------------------------------------
# mocks[0].mock_calls = [call(<__main__.Client object at 0x103248ef0>),
# call.__eq__(call._come_to_a_bad_end(<__main__.Client object at 0x103248ef0>)),
# call.__str__()]
# type(mocks[0]) = <class 'function'>
# ------------------------------------------------
# type(master.mock_calls[0]) = <class 'unittest.mock._Call'>
# master.mock_calls = [call._come_to_a_bad_end(<__main__.Client object at 0x103248ef0>),
# call._come_to_a_bad_end.__eq__(call._come_to_a_bad_end(<__main__.Client object at 0x103248ef0>)),
# call._come_to_a_bad_end.__str__()]
# master.mock_calls[0] = call._come_to_a_bad_end(<__main__.Client object at 0x103248ef0>)
# master.mock_calls[0].mock = mock
if __name__ == '__main__':
unittest.main()
|
py | b40103c2847dde6c46b47ffdc81c65586e984815 | # https://github.com/andrewliao11/gail-tf
# import lib.tf_util as U
import tensorflow as tf
import numpy as np
import lib.layer as layer
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
# ================================================================
# logit_bernoulli_entropy
# ================================================================
def logsigmoid(a):
'''Equivalent to tf.log(tf.sigmoid(a))'''
return -tf.nn.softplus(-a)
def logit_bernoulli_entropy(logits):
ent = (1. - tf.nn.sigmoid(logits)) * logits - logsigmoid(logits)
return ent
# ================================================================
# Discriminator
# ================================================================
class TransitionClassifier(object):
def __init__(self, sess, hidden_size, input_size, output_size,
use_norm, pop_batch_norm, entcoeff=0.001, lr_rate=1e-3, scope="adversary"):
self.scope = scope
self.observation_shape = (input_size,) # env.observation_space.shape
self.actions_shape = (output_size,) # env.action_space.shape
self.hidden_size = hidden_size
self.lr = lr_rate
self.sess = sess
self.use_norm = use_norm
self.pop_batch_norm = pop_batch_norm
self.build_ph()
# Build grpah
generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)
expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)
# Build accuracy
generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))
expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))
# Build regression loss
# let x = logits, z = targets.
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))
generator_loss = tf.reduce_mean(generator_loss)
expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
expert_loss = tf.reduce_mean(expert_loss)
# Build entropy loss
logits = tf.concat([generator_logits, expert_logits], 0)
entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
entropy_loss = -entcoeff * entropy
# Loss + Accuracy terms
self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
self.total_loss = generator_loss + expert_loss + entropy_loss
# Build Reward for policy
self.reward_op = -tf.log(1 - tf.nn.sigmoid(generator_logits) + 1e-8)
#var_list = self.get_trainable_variables()
# self.lossandgrad = U.function([self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph],
# self.losses + [flatgrad(self.total_loss, var_list)])
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.scope)
with tf.control_dependencies(update_ops):
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.total_loss)
def build_ph(self):
self.generator_obs_ph = tf.placeholder(tf.float32, (None, ) + self.observation_shape, name="observations_ph")
self.generator_acs_ph = tf.placeholder(tf.float32, (None, ) + self.actions_shape, name="actions_ph")
self.expert_obs_ph = tf.placeholder(tf.float32, (None, ) + self.observation_shape, name="expert_observations_ph")
self.expert_acs_ph = tf.placeholder(tf.float32, (None, ) + self.actions_shape, name="expert_actions_ph")
def build_graph(self, obs_ph, acs_ph, reuse=False):
with tf.variable_scope(self.scope):
if reuse:
tf.get_variable_scope().reuse_variables()
data = tf.concat([obs_ph, acs_ph], axis=1) # concatenate the two input -> form a transition
'''p_h1 = tf.contrib.layers.fully_connected(_input, self.hidden_size, activation_fn=tf.nn.relu)
p_h2 = tf.contrib.layers.fully_connected(p_h1, self.hidden_size, activation_fn=tf.nn.relu)
logits = tf.contrib.layers.fully_connected(p_h2, 1, activation_fn=tf.identity)'''
if self.use_norm:
data = layer.batch_norm(data, self.pop_batch_norm, 'BN')
d1 = layer.dense_layer(data, 128, "DenseLayer1", is_training=self.pop_batch_norm, trainable=True,
norm=self.use_norm)
d2 = layer.dense_layer(d1, 32, "DenseLayer2", is_training=self.pop_batch_norm, trainable=True,
norm=self.use_norm)
dout = layer.dense_layer(d2, 1, "DenseLayer3", func=None,
is_training=self.pop_batch_norm, trainable=True, norm=None)
return dout
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def train(self, ob_batch, ac_batch, ob_expert, ac_expert):
feed_dict = {self.generator_obs_ph: ob_batch,
self.generator_acs_ph: ac_batch,
self.expert_obs_ph: ob_expert,
self.expert_acs_ph: ac_expert,
self.pop_batch_norm: True,
}
train_op = [self.train_step] + self.losses
result = self.sess.run(train_op, feed_dict=feed_dict)
print('train result:', result)
def get_reward(self, obs, acs):
if len(obs.shape) == 1:
obs = np.expand_dims(obs, 0)
if len(acs.shape) == 1:
acs = np.expand_dims(acs, 0)
feed_dict = {self.generator_obs_ph: obs,
self.generator_acs_ph: acs,
self.pop_batch_norm: False,
}
reward = self.sess.run(self.reward_op, feed_dict)
return reward
|
py | b40103caf8988ddfbed90cd4e09cf1545af980d2 | import subprocess
import sys
py_path = '../../scripts/'
sys.path.insert(0, py_path)
import utils as ut
from colors import *
#
# Compile and run all the agent class specific tests
#
# Compile
subprocess.call(['python3.6 compilation.py'], shell=True)
# Test suite 1
ut.msg('Flu class functionality tests', CYAN)
subprocess.call(['./flu_test'], shell=True)
|
py | b401049ce1e3865fe74ef6bf5def5ceac7ab53a7 | TRAIN_PATH = 'fnc_pkl/train_datapoints.pkl'
TEST_PATH = 'fnc_pkl/test_datapoints.pkl'
TRAIN_CSV = {
'stances' : 'fnc-1/train_stances.csv',
'bodies' : 'fnc-1/train_bodies.csv',
}
TEST_CSV = {
'stances' : 'fnc-1/competition_test_stances.csv',
'bodies' : 'fnc-1/competition_test_bodies.csv',
}
LEN_HEADLINE = 15
LEN_BODY = 60
TRAIN_CFG = {
'BATCH_SIZE' : 32,
'N_EPOCHS' : 5,
'WEIGHTS_PATH' : 'model_chkpts/cond_cnn_classif',
'PATIENCE' : 1,
'LR' : 0.001,
'LR_DECAY_STEPS' : 10,
'LR_DECAY_GAMMA' : 0.1,
'COND_AGGRESSION_LAMBDA' : 2,
}
DATA_CFG = {
'MAX_VOCAB_SIZE' : 40000,
'VECTORS': 'glove.6B.100d',
'VECTOR_SIZE' : 100,
}
VANILLA_COND_CNN_NET_CFG = {
'h_num_filt' : 256,
'h_n_list' : [2,3,4],
'b_num_filt' : 192,
'b_n_list' : [2,3,4,5],
'num_classes' : None, # To fill dynamically
'dropout_rate' : 0.4,
}
POS_TAGGED_COND_CNN_NET_CFG = {
'h_num_filt' : 256,
'h_n_list' : [1,2,3],
'b_num_filt' : 256,
'b_n_list' : [1,2,3],
'num_classes' : None, # To fill dynamically
'dropout_rate' : 0.4,
}
SHARED_CONV_VANILLA_COND_CNN_NET_CFG = {
'num_filt' : 256,
'n_list' : [2,3,4],
'num_classes' : None, # To fill dynamically
'dropout_rate' : 0.4,
}
SHARED_CONV_POS_TAGGED_COND_CNN_NET_CFG = {
'num_filt' : 256,
'n_list' : [1,2,3],
'num_classes' : None, # To fill dynamically
'dropout_rate' : 0.4,
}
EMBED_CFG = {
'H_V' : None, # To fill dynamically
'B_V' : None, # To fill dynamically
'D' : DATA_CFG['VECTOR_SIZE']
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.