metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jernejvivod/skrelief",
"score": 3
}
|
#### File: skrelief/skrelief/turf.py
```python
import numpy as np
from scipy.stats import rankdata
from sklearn.base import BaseEstimator, TransformerMixin
from julia import Julia
Julia(compiled_modules=False)
from julia import Relief as Relief_jl
from skrelief.relieff import Relieff
class TuRF(BaseEstimator, TransformerMixin):
"""sklearn compatible implementation of the TuRF algorithm.
Reference:
<NAME> and <NAME>. Tuning ReliefF for genome-wide
genetic analysis. In <NAME>, <NAME>, and <NAME>.
Rajapakse, editors, Evolutionary Computation,Machine Learning and
Data Mining in Bioinformatics, pages 166–175. Springer, 2007.
Args:
n_features_to_select (int): number of features to select from dataset.
num_it (int): number of iterations.
rba (object): feature weighting algorithm wrapped by the VLSRelief algorithm. If equal
to None, the default ReliefF RBA implemented in Julia is used.
Attributes:
n_features_to_select (int): number of features to select from dataset.
num_it (int): number of iterations.
_rba (object): feature weighting algorithm wrapped by the TuRF algorithm.
"""
def __init__(self, n_features_to_select=10, num_it=10, rba=None):
self.n_features_to_select = n_features_to_select
self.num_it = num_it
self._rba = rba
def fit(self, data, target):
"""
Rank features using TuRF feature selection algorithm
Args:
data (numpy.ndarray): matrix of data samples
target (numpy.ndarray): vector of target values of samples
Returns:
(object): reference to self
"""
# Compute feature weights and rank.
if self._rba is not None:
self.weights = Relief_jl.turf(data, target, self.num_it, self.rba_wrap)
else:
self.weights = Relief_jl.turf(data, target, self.num_it)
self.rank = rankdata(-self.weights, method='ordinal')
# Return reference to self.
return self
def transform(self, data):
"""
Perform feature selection using computed feature ranks.
Args:
data (numpy.ndarray): matrix of data samples on which to perform feature selection.
Returns:
(numpy.ndarray): result of performing feature selection.
"""
# select n_features_to_select best features and return selected features.
msk = self.rank <= self.n_features_to_select # Compute mask.
return data[:, msk] # Perform feature selection.
```
|
{
"source": "jernej-vrscaj/Hello_IoT",
"score": 3
}
|
#### File: Hello_IoT/script/Hello_IoT_CGI_notify.py
```python
print('Content-Type: text/html \n')
# Import modules #
from bluepy import btle
import cgi
import time
import struct
import sys
import os
# Brief: Callback class for notification events
# Input args: # valHandle: Tuple of characteristic handle values
# # fahr: FORMs fahrenheit value
# # v_list: List of characteristic values [temperature, humidity, pressure]
# Return val: # None
#
class MyDelegate(btle.DefaultDelegate):
def __init__(self, valHandle, fahr, v_list):
btle.DefaultDelegate.__init__(self)
self.__envHandle = valHandle
self.__fahr = fahr
self.__vList = v_list
def handleNotification(self, cHandle, data):
if cHandle == self.__envHandle[0]:
cnv_data = struct.unpack('h', data)
if self.__fahr == 'Fahrenheit':
self.__vList[0] = 'T = {0:.2f} °F'.format((cnv_data[0]*0.01)*1.8 + 32.0)
else:
self.__vList[0] = 'T = {0:.2f} °C'.format(cnv_data[0]*0.01)
elif cHandle == self.__envHandle[1]:
cnv_data = struct.unpack('H', data)
self.__vList[1] = 'H = {0:.2f} %'.format(cnv_data[0]*0.01)
elif cHandle == self.__envHandle[2]:
cnv_data = struct.unpack('I', data)
self.__vList[2] = 'P = {0:.2f} hPa'.format(cnv_data[0]*0.001)
####
# Brief: Save environmental values to .txt file
# Input args: # v_list: List of characteristic values [temperature, humidity, pressure]
# Return val: # None
#
def save_to_textf(v_list):
# Append on open #
try:
fp = open('env_val.txt', 'a', encoding='utf8')
except IOError as io_exp:
display_cgi_page_err(io_exp)
sys.exit()
f_data = v_list[0] + ',' + v_list[1] + ',' + v_list[2] + '\n'
fp.write(f_data)
if not fp.closed:
fp.close()
####
# Brief: Error HTML page
# Input args: # exp: Raised exception
# Return val: # None
#
def display_cgi_page_err(exp):
print('''
<!DOCTYPE html>
<html>
<head>
<title>Error</title>
<script>
</script>
<style>
html {
background: url(/Nature___Sundown_Golden_sunset_above_the_clouds_042961_23.jpg) no-repeat center fixed;
background-size: cover;
background-color: rgba(128, 128, 128, 0.4); /* Used if the image is unavailable */
height: 100%;
width: 100%;
}
.myfont {
color: white;
text-shadow: 1px 1px rgba(0, 0, 0, 1);
font-family: Trebuchet MS, Helvetica, sans-serif;
}
</style>
</head>
<body>
''')
print('<p class="myfont" style="font-size: 200%;">Error:',exp,'</p>')
print('''
</body>
</html>
''')
####
# Brief: Read characteristic values
# Input args: # sensor: BLE peripheral object
# # fahr: FORMs fahrenheit value
# # v_list: List of characteristic values [temperature, humidity, pressure]
# Return val: # None
#
def read_ch_values(sensor, fahr, v_list):
# Environmental service #
uuid_svc_env = btle.UUID('0000181a-0000-1000-8000-00805f9b34fb')
svc_env = sensor.getServiceByUUID(uuid_svc_env)
# Before reading data, wait some time for sensors to do their readings
# and update characteristic values #
time.sleep(0.2)
# Read data from Temperature characteristic #
uuid_ch_temp = btle.UUID('00002a6e-0000-1000-8000-00805f9b34fb')
ch_temp = svc_env.getCharacteristics(uuid_ch_temp)[0]
val_temp = ch_temp.read()
temp_tuple = struct.unpack('h', val_temp)
if fahr == 'Fahrenheit':
v_list[0] = 'T = {0:.2f} °F'.format((temp_tuple[0]*0.01)*1.8 + 32.0)
else:
v_list[0] = 'T = {0:.2f} °C'.format(temp_tuple[0]*0.01)
# Read data from Humidity characteristic #
uuid_ch_humd = btle.UUID('00002a6f-0000-1000-8000-00805f9b34fb')
ch_humd = svc_env.getCharacteristics(uuid_ch_humd)[0]
val_humd = ch_humd.read()
humd_tuple = struct.unpack('H', val_humd)
v_list[1] = 'H = {0:.2f} %'.format(humd_tuple[0]*0.01)
# Read data from Pressure characteristic #
uuid_ch_press = btle.UUID('00002a6d-0000-1000-8000-00805f9b34fb')
ch_press = svc_env.getCharacteristics(uuid_ch_press)[0]
val_press = ch_press.read()
press_tuple = struct.unpack('I', val_press)
v_list[2] = 'P = {0:.2f} hPa'.format(press_tuple[0]*0.001)
####
# Brief: Read characteristic notification values
# Input args: # sensor: BLE peripheral object
# # fahr: FORMs fahrenheit value
# # UPDT_INT: Sensor values update interval in connection mode, in seconds
# # v_list: List of characteristic values [temperature, humidity, pressure]
# Return val: # None
#
def read_ntfcn_values(sensor, fahr, UPDT_INT, v_list):
read_ch_values(sensor, fahr, v_list)
save_to_textf(v_list)
# Environmental service #
uuid_svc_env = btle.UUID('0000181a-0000-1000-8000-00805f9b34fb')
svc_env = sensor.getServiceByUUID(uuid_svc_env)
# Setup to turn notifications ON #
# Temperature char #
uuid_ch_temp = btle.UUID('00002a6e-0000-1000-8000-00805f9b34fb')
ch_temp = svc_env.getCharacteristics(uuid_ch_temp)[0]
# Humidity char #
uuid_ch_humd = btle.UUID('00002a6f-0000-1000-8000-00805f9b34fb')
ch_humd = svc_env.getCharacteristics(uuid_ch_humd)[0]
# Pressure char #
uuid_ch_press = btle.UUID('00002a6d-0000-1000-8000-00805f9b34fb')
ch_press = svc_env.getCharacteristics(uuid_ch_press)[0]
# Tuple of characteristic handle values #
ch_env_hnd = ch_temp.valHandle, ch_humd.valHandle, ch_press.valHandle
# Set callback object for notification events #
sensor.setDelegate(MyDelegate(ch_env_hnd, fahr, v_list))
# Temperature notification ON #
sensor.writeCharacteristic(ch_temp.valHandle+1, b'\x01\x00')
# Humidity notification ON #
sensor.writeCharacteristic(ch_humd.valHandle+1, b'\x01\x00')
# Pressure notification ON #
sensor.writeCharacteristic(ch_press.valHandle+1, b'\x01\x00')
while True:
try:
if sensor.waitForNotifications(UPDT_INT):
save_to_textf(v_list)
except KeyboardInterrupt:
sys.exit()
except btle.BTLEDisconnectError as btle_exp:
display_cgi_page_err(btle_exp)
sys.exit()
# Other #
except Exception as exp:
display_cgi_page_err(exp)
sys.exit()
####
# Brief: Main
# Input args: # None
# Return val: # None
#
def main():
# Read values from the FORM #
formdata = cgi.FieldStorage()
fahrenheit = formdata.getvalue('fahrenheit')
# Sensor values update interval in connection mode, in seconds #
UPDT_INT = 1.0
# Environmental values #
temperature = ''
humidity = ''
pressure = ''
val_list = [temperature, humidity, pressure]
# Directory for the .txt file #
os.chdir('/var/www/html/')
# Clear content on open #
try:
fp = open('env_val.txt', 'w', encoding='utf8')
except IOError as io_exp:
display_cgi_page_err(io_exp)
sys.exit()
if not fp.closed:
fp.close()
# Connect to peripheral #
try:
periph = btle.Peripheral('D0:65:F1:9B:08:4B', btle.ADDR_TYPE_RANDOM)
except btle.BTLEDisconnectError as btle_exp:
display_cgi_page_err(btle_exp)
sys.exit()
finally:
read_ntfcn_values(periph, fahrenheit, UPDT_INT, val_list)
####
if __name__ == "__main__":
main()
```
|
{
"source": "jernelv/SpecAnalysis",
"score": 3
}
|
#### File: modules/libs/PLSRlib.py
```python
import numpy as np
import scipy
def Der(x,y):
"""Function for finding first derivative of spectral data. Uses finite differences."""
n=len(x)
x2=np.zeros(n-1)
y2=np.zeros(n-1)
for i in range(n-1):
x2[i]=0.5*(x[i]+x[i+1])
y2[i]=(y[i+1]-y[i])/(x[i+1]-x[i])
return(x2,y2)
def Der2(x,y):
"""Function for finding second derivative of spectral data. Uses finite differences."""
n=len(x)
x2=np.zeros(n-2)
y2=np.zeros(n-2)
dx2=(x[1]-x[0])**2 # assumed constant
for i in range(n-2):
x2[i]=x[i+1]
y2[i]=(y[i]-2*y[i+1]+y[i+2])/dx2
return(x2,y2)
def mlr(x,y,order):
"""Multiple linear regression fit of the columns of matrix x
(dependent variables) to constituent vector y (independent variables)
order - order of a smoothing polynomial, which can be included
in the set of independent variables. If order is
not specified, no background will be included.
b - fit coeffs
f - fit result (m x 1 column vector)
r - residual (m x 1 column vector)
"""
if order > 0:
s=scipy.ones((len(y),1))
for j in range(order):
s=scipy.concatenate((s,(scipy.arange(0,1+(1.0/(len(y)-1))-0.5/(len(y)-1),1.0/(len(y)-1))**j)[:,nA]),1)
X=scipy.concatenate((x, s),1)
else:
X = x
b = scipy.dot(scipy.dot(scipy.linalg.pinv(scipy.dot(scipy.transpose(X),X)),scipy.transpose(X)),y)
f = scipy.dot(X,b)
r = y - f
return b,f,r
def emsc(case, order, fit=None):
"""Extended multiplicative scatter correction
case - spectral data for background correction
order - order of polynomial
fit - if None then use average spectrum, otherwise provide a spectrum
as a column vector to which all others fitted
corr - EMSC corrected data
mx - fitting spectrum
"""
if not type(fit)==type(None):
mx = fit
else:
mx = scipy.mean(case,axis=0)[:,nA]
corr = scipy.zeros(case.shape)
for i in range(len(case)):
b,f,r = mlr(mx, case[i,:][:,nA], order)
corr[i,:] = scipy.reshape((r/b[0,0]) + mx, (corr.shape[1],))
corr=np.nan_to_num(corr)
return corr
def baseline_corr(case):
"""Baseline correction that sets the first independent variable of each
spectrum to zero."""
size = case.shape
subtract = scipy.transpose(scipy.resize(scipy.transpose(case[:,0]),(size[1],size[0])))
return (case-subtract)
def baseline_avg(case):
"""Baseline correction that subtracts an average of the first and last
independent variable from each variable."""
size = case.shape
subtract = scipy.transpose(scipy.resize(scipy.transpose((case[:,0]+case[:size[1]-1])/2),(size[1],size[0])))
return (case-subtract)
def baseline_linear(case):
"""Baseline correction that subtracts a linearly increasing baseline between
the first and last independent variable."""
size, t = case.shape, 0
subtract = scipy.zeros((size[0],size[1]), 'd')
while t < size[0]:
a = case[t,0]
b = case[t,size[1]-1]
div = (b-a)/size[1]
if div == 0:
div = 1
arr = scipy.arrange(a,b,div,'d')
subtract[t,:] = scipy.resize(arr,(size[1],))
t = t+1
return case-subtract
```
#### File: modules/libs/PLSRwavelengthSelection.py
```python
import numpy as np
import fns
from . import PLSRregressionMethods
from . import PLSRsave
import tkinter
import copy
import sklearn.model_selection
import types
from . import PLSRclassifiers
def get_buttons():
buttons=[
{'key': 'RNNtab2name', 'type': 'tabname', 'text': 'Wavelength Selection', 'tab': 2} ,
{'key': 'RegressionL3', 'type': 'label', 'text': 'Type of wavelength selection:', 'tab': 2, 'row': 2} ,
{'key': 'regression_wavelength_selection', 'type': 'radio:vertical:text', 'texts': ['No wavelength selection', 'Moving Window', 'Genetic Algorithm','Sequential Feature Selector'], 'tab': 2, 'row': 3} ,
{'key': 'moving_window_min', 'type': 'txt:float', 'text': 'Min window', 'default': '30', 'width': 4, 'tab': 2, 'row': 4} ,
{'key': 'moving_window_max', 'type': 'txt:float', 'text': 'Max window', 'default': '100', 'width': 4, 'tab': 2, 'row': 4} ,
{'key': 'RegressionL4', 'type': 'label', 'text': 'GA options ', 'tab': 2, 'row': 5} ,
{'key': 'GA_number_of_individuals', 'type': 'txt:int', 'text': 'GA num. Individuals', 'default': '100', 'width': 4, 'tab': 2, 'row': 5} ,
{'key': 'GA_crossover_rate', 'type': 'txt:float', 'text': 'GA crossover rate', 'default': '0.8', 'width': 4, 'tab': 2, 'row': 5} ,
{'key': 'GA_mutation_rate', 'type': 'txt:float', 'text': 'GA mutation rate', 'default': '0.001', 'width': 6, 'tab': 2, 'row': 5} ,
{'key': 'GA_max_number_of_generations', 'type': 'txt:int', 'text': 'GA generations', 'default': '20', 'width': 3, 'tab': 2, 'row': 5} ,
{'key': 'SFS type', 'type': 'radio:text', 'texts': ['Forward', 'Backward'], 'tab': 2, 'row': 6} ,
{'key': 'SFS_floating', 'type': 'check', 'text': 'Floating', 'tab': 2, 'row': 6} ,
{'key': 'SFS_num_after_min', 'type': 'txt:int', 'text': 'Iterations after min', 'default': '30', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'SFS_target', 'type': 'txt:int', 'text': 'Target number', 'default': '20', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'SFS_max_iterations', 'type': 'txt:int', 'text': 'Max iterations', 'default': '300', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'WS_loss_type', 'type': 'radio:text', 'texts': ['X-validation on training', 'RMSEC on training', 'RMSEP on validation'], 'tab': 2, 'row': 8} ,
{'key': 'WS_cross_val_N', 'type': 'txt:int', 'text': 'WS cross val fold', 'default': '1', 'width': 4, 'tab': 2, 'row': 9} ,
{'key': 'WS_cross_val_max_cases', 'type': 'txt:int', 'text': 'WS cross val num cases', 'default': '-1', 'width': 4, 'tab': 2, 'row': 9} ,
]
return buttons
def MW(case,ui,common_variables,keywords={}):
T=case.T
V=case.V
wavenumbers=case.wavenumbers
folder=case.folder
try:
keywords=case.keywords
except:
keywords={}
WS_getCrossvalSplits([0,1],T,V,ui,use_stored=False)
# get regression module
reg_module=PLSRregressionMethods.getRegModule(ui['reg_type'],keywords)
# Set what datapoints to include, the parameter 'wavenum' is in units cm^-1
if ui['save_check_var']:
common_variables.tempax.fig=common_variables.tempfig
#len_wavenumbers=len(wavenumbers)
dw=wavenumbers[0]-wavenumbers[1]
# Windowsize is input in cm^-1, transform to indexes
MWmax=int(round(ui['moving_window_max']/abs(dw),0))
MWmin=int(round(ui['moving_window_min']/abs(dw),0))
Wresults=np.zeros((len(wavenumbers),MWmax+1-MWmin))
Wsizes=np.arange(MWmin,MWmax+1)
# do moving window
for i,Wsize in enumerate(Wsizes):
trail_active_wavenumbers=[]
for j, Wcenter in enumerate(wavenumbers):
Wstart=j-Wsize//2
Wend=Wstart+Wsize
#if Wsize < MWmax+1 and i < len(wavenumbers)+1:
if Wstart<0:
k=j
continue
elif Wend>len(wavenumbers):
l=j
break
else:
trail_active_wavenumbers.append(np.arange(Wstart,Wend))
#Wresults[j,i]=WS_getRMSEP(reg_module,trail_active_wavenumbers[-1],T,V,use_stored=False)
print('moving window row '+str(i)+' of '+str(len(Wsizes)))
Wresults[k+1:l,i], _ = WS_evaluate_chromosomes(reg_module,
T, V, trail_active_wavenumbers,
use_stored=True)
# done moving window
Wresults=Wresults+(Wresults==0)*np.max(Wresults) # set empty datapoints to max value
j,i=np.unravel_index(Wresults.argmin(), Wresults.shape)
bestVal=Wresults[j,i]
bestSize=Wsizes[i]
bestStart=j-bestSize//2
# plot MWresults
Wresults=np.array(Wresults)
# make plot
Wwindowsize,Wwavenumbers = np.meshgrid(Wsizes*abs(dw), wavenumbers)
unique_keywords=PLSRsave.get_unique_keywords_formatted(common_variables.keyword_lists,keywords)
PLSRsave.PcolorMW(Wwavenumbers,Wwindowsize,Wresults,fns.add_axis(common_variables.fig,ui['fig_per_row'],ui['max_plots']),unique_keywords[1:],ui)
if ui['save_check_var']:
tempCbar=PLSRsave.PcolorMW(Wwavenumbers,Wwindowsize,Wresults,common_variables.tempax,unique_keywords[1:],ui)
common_variables.tempfig.subplots_adjust(bottom=0.13,left=0.15, right=0.97, top=0.9)
plotFileName=folder+ui['reg_type']+unique_keywords.replace('.','p')+'_moving_window'
common_variables.tempfig.savefig(plotFileName+ui['file_extension'])
tempCbar.remove()
# set result as keywords, so that they are saved
bestEnd=bestStart+bestSize
Wwidth=wavenumbers[bestStart]-wavenumbers[bestEnd-1] #cm-1
Wcenter=0.5*(wavenumbers[bestStart]+wavenumbers[bestEnd-1]) #cm-1
keywords['MW width']=str(round(Wwidth,1))+r' cm$^{-1}$'
keywords['MW center']=str(round(Wcenter,1))+r' cm$^{-1}$'
# prepare return vector
active_wavenumers=np.zeros(len(wavenumbers), dtype=bool)
active_wavenumers[bestStart:bestEnd]=True
return active_wavenumers
def WS_evaluate_chromosomes(reg_module,T,V,trail_active_wavenumbers,ui=None,use_stored=False,backup_reg_module=None):
used_mlr=False
losses=np.zeros(len(trail_active_wavenumbers))
for i,active_wavenumers in enumerate(trail_active_wavenumbers):
#print(,i,' of ',len(active_wavenumers))
#i+=1
try:
losses[i]=WS_getRMSEP(reg_module,active_wavenumers,T,V,ui=ui,use_stored=use_stored)
except:
used_mlr=True
losses[i]=WS_getRMSEP(backup_reg_module,active_wavenumers,T,V,ui=ui,use_stored=use_stored)
return losses, used_mlr
def WS_getRMSEP(reg_module,chromosome,T,V,ui=None,use_stored=False):
# ui is optional only if use_stored=True
Ts,Vs=WS_getCrossvalSplits(chromosome,T,V,ui=None,use_stored=use_stored)
RMSEP=[]
percent_cor_classified_list=[]
for curT,curV in zip(Ts,Vs):
reg_module.fit(curT.X, curT.Y)
curV.pred = reg_module.predict(curV.X)[:,0]
if reg_module.type=='regression':
RMSEP.append(np.sqrt((np.sum((curV.pred-curV.Y)**2)))/len(curV.Y))
else: #reg_module.type=='classifier'
percent_cor_classified_list.append(PLSRclassifiers.get_correct_categorized(curV.pred,curV.Y))
if reg_module.type=='regression':
return np.sqrt(np.sum(np.array(RMSEP)**2)/len(RMSEP))
else:
return 1-np.average(percent_cor_classified_list)
def WS_getCrossvalSplits(chromosome,T,V,ui=None,use_stored=False):
global stored_XvalTs
global stored_XvalVs
if use_stored==True:
XvalTs = copy.deepcopy(stored_XvalTs)
XvalVs = copy.deepcopy(stored_XvalVs)
else:
XvalTs=[]
XvalVs=[]
if ui['WS_loss_type']=='X-validation on training':
if ui['WS_cross_val_N']==1 and ui['WS_cross_val_max_cases']==-1:
splitmodule=sklearn.model_selection.LeaveOneOut()
else:
splitmodule=sklearn.model_selection.ShuffleSplit(n_splits=ui['WS_cross_val_max_cases'], test_size=ui['WS_cross_val_N'])
for train,val in splitmodule.split(T.X):
XvalTs.append(types.SimpleNamespace())
XvalTs[-1].X=np.array(T.X[train])
XvalTs[-1].Y=np.array(T.Y[train])
XvalVs.append(types.SimpleNamespace())
XvalVs[-1].X=np.array(T.X[val])
XvalVs[-1].Y=np.array(T.Y[val])
elif ui['WS_loss_type']=='RMSEC on training':
XvalTs.append(copy.deepcopy(T))
XvalVs=XvalTs # pointer to object, no need to copy it
else:# ui['WS_loss_type']=='RMSEP on validation':
XvalTs.append(copy.deepcopy(T))
XvalVs.append(copy.deepcopy(V))
stored_XvalTs = copy.deepcopy(XvalTs)
stored_XvalVs = copy.deepcopy(XvalVs)
for T in XvalTs:
T.X=T.X[:,chromosome]
if len(XvalVs[0].X[0])>len(XvalTs[0].X[0]): # this is just a check to see if T==V, in that case we should not act on
for V in XvalVs:
V.X=V.X[:,chromosome]
return XvalTs,XvalVs
```
#### File: modules/libs/signal_alignment.py
```python
import numpy as np
from scipy.optimize import minimize
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import shift
from statsmodels.tsa.stattools import ccovf
def chisqr_align(reference, target, roi, order=1, init=0.1, bound=1):
'''
Align a target signal to a reference signal within a region of interest (ROI)
by minimizing the chi-squared between the two signals. Depending on the shape
of your signals providing a highly constrained prior is necessary when using a
gradient based optimization technique in order to avoid local solutions.
Args:
reference (1d array/list): signal that won't be shifted
target (1d array/list): signal to be shifted to reference
roi (tuple): region of interest to compute chi-squared
order (int): order of spline interpolation for shifting target signal
init (int): initial guess to offset between the two signals
bound (int): symmetric bounds for constraining the shift search around initial guess
Returns:
shift (float): offset between target and reference signal
Todo:
* include uncertainties on spectra
* update chi-squared metric for uncertainties
* include loss function on chi-sqr
'''
# convert to int to avoid indexing issues
ROI = slice(int(roi[0]), int(roi[1]), 1)
# normalize ref within ROI
reference = reference/np.mean(reference[ROI])
# define objective function: returns the array to be minimized
def fcn2min(x):
shifted = shift(target,x,order=order)
shifted = shifted/np.mean(shifted[ROI])
return np.sum( ((reference - shifted)**2 )[ROI] )
# set up bounds for pos/neg shifts
minb = min( [(init-bound),(init+bound)] )
maxb = max( [(init-bound),(init+bound)] )
# minimize chi-squared between the two signals
result = minimize(fcn2min,init,method='L-BFGS-B',bounds=[ (minb,maxb) ])
return result.x[0]
def phase_align(reference, target, roi, res=100):
'''
Cross-correlate data within region of interest at a precision of 1./res
if data is cross-correlated at native resolution (i.e. res=1) this function
can only achieve integer precision
Args:
reference (1d array/list): signal that won't be shifted
target (1d array/list): signal to be shifted to reference
roi (tuple): region of interest to compute chi-squared
res (int): factor to increase resolution of data via linear interpolation
Returns:
shift (float): offset between target and reference signal
'''
# convert to int to avoid indexing issues
ROI = slice(int(roi[0]), int(roi[1]), 1)
# interpolate data onto a higher resolution grid
x,r1 = highres(reference[ROI],kind='linear',res=res)
x,r2 = highres(target[ROI],kind='linear',res=res)
# subtract mean
r1 -= r1.mean()
r2 -= r2.mean()
# compute cross covariance
cc = ccovf(r1,r2,demean=False,unbiased=False)
# determine if shift if positive/negative
if np.argmax(cc) == 0:
cc = ccovf(r2,r1,demean=False,unbiased=False)
mod = -1
else:
mod = 1
# often found this method to be more accurate then the way below
return np.argmax(cc)*mod*(1./res)
# interpolate data onto a higher resolution grid
x,r1 = highres(reference[ROI],kind='linear',res=res)
x,r2 = highres(target[ROI],kind='linear',res=res)
# subtract off mean
r1 -= r1.mean()
r1 -= r2.mean()
# compute the phase-only correlation function
product = np.fft.fft(r1) * np.fft.fft(r2).conj()
cc = np.fft.fftshift(np.fft.ifft(product))
# manipulate the output from np.fft
l = reference[ROI].shape[0]
shifts = np.linspace(-0.5*l,0.5*l,l*res)
# plt.plot(shifts,cc,'k-'); plt.show()
return shifts[np.argmax(cc.real)]
def highres(y,kind='cubic',res=100):
'''
Interpolate data onto a higher resolution grid by a factor of *res*
Args:
y (1d array/list): signal to be interpolated
kind (str): order of interpolation (see docs for scipy.interpolate.interp1d)
res (int): factor to increase resolution of data via linear interpolation
Returns:
shift (float): offset between target and reference signal
'''
y = np.array(y)
x = np.arange(0, y.shape[0])
f = interp1d(x, y,kind='cubic')
xnew = np.linspace(0, x.shape[0]-1, x.shape[0]*res)
ynew = f(xnew)
return xnew,ynew
if __name__ == "__main__":
from scipy import signal
import matplotlib.pyplot as plt
NPTS = 100
SHIFTVAL = 4
NOISE = 1e-2 # can perturb offset retrieval from true
print('true signal offset:',SHIFTVAL)
# generate some noisy data and simulate a shift
y = signal.gaussian(NPTS, std=4) + np.random.normal(1,NOISE,NPTS)
shifted = shift( signal.gaussian(NPTS, std=4) ,SHIFTVAL) + np.random.normal(1,NOISE,NPTS)
# align the shifted spectrum back to the real
s = phase_align(y, shifted, [10,90])
print('phase shift value to align is',s)
# chi squared alignment at native resolution
s = chisqr_align(y, shifted, [10,90], init=-3.5,bound=2)
print('chi square alignment',s)
# make some diagnostic plots
plt.plot(y,label='original data')
plt.plot(shifted,label='shifted data')
plt.plot(shift(shifted,s,mode='nearest'),ls='--',label='aligned data')
plt.legend(loc='best')
plt.show()
```
#### File: SpecAnalysis/modules/PLSR.py
```python
from __future__ import print_function
import fns
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
import scipy.signal
from scipy import signal
#from sklearn.model_selection import LeavePOut
#from sklearn.model_selection import KFold
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.linear_model import ElasticNet
import sklearn.metrics
import types
from math import sqrt
import copy
import sys
import importlib
from .libs import PLSRsave
from .libs import PLSRGeneticAlgorithm
from .libs import PLSRNN
from .libs import PLSRRNN
from .libs import PLSRCNN
from .libs import PLSR_file_import
from .libs import PLSRregressionMethods
from .libs import PLSRregressionVisualization
from .libs import PLSRpreprocessing
from .libs import PLSRwavelengthSelection
from .libs import PLSRsequential_feature_selectors
from .libs import PLSRclassifiers
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#### this
'''functions_to_wrap = [[matplotlib.axes.Axes,'pcolormesh'],
[matplotlib.figure.Figure,'colorbar'],
[matplotlib.figure.Figure,'clf'],
[matplotlib.figure.Figure,'set_size_inches'],
[matplotlib.figure.Figure,'add_subplot'],
[matplotlib.figure.Figure,'subplots'],
[matplotlib.figure.Figure,'subplots_adjust'],
[matplotlib.axes.Axes,'invert_yaxis'],
[matplotlib.axes.Axes,'invert_xaxis'],
[matplotlib.axes.Axes,'set_title'],
[matplotlib.axes.Axes,'axis'],
[matplotlib.axes.Axes,'cla'],
[matplotlib.axes.Axes,'plot'],
[matplotlib.figure.Figure,'savefig'],
[matplotlib.axes.Axes,'set_xlim'],
[matplotlib.axes.Axes,'set_position'],
[matplotlib.axes.Axes,'bar'],
[matplotlib.figure.Figure,'add_axes'],
[plt,'figure'],
]
for function in functions_to_wrap:
if not 'function rimt.<locals>.rimt_this' in str(getattr(function[0], function[1])):
setattr(function[0], function[1], fns.rimt(getattr(function[0], function[1])))'''
#from multiprocessing import Pool
#import datetime
#matplotlib.rc('text', usetex=True)
#matplotlib.rc('text.latex', preamble=r'\usepackage{upgreek}')
def crossval(T,V,ui,case):
if not ui['is_validation']=='X-val on training':
case.supressplot=0
return [case]
else:
case.Xval_cases=[]
#XvalTs=[]
#XvalVs=[]
#supressplots=[]
if ui['cross_val_N']==1 and ui['cross_val_max_cases']==-1:
#ui['cross_val_max_cases']=len(T.Y)
splitodule=LeaveOneOut()
print('Using sklearn.LeaveOneOut on '+str(len(T.Y))+' measurements. Maxcases set to '+str(len(T.Y)))
else:
if ui['cross_val_max_cases']==-1:
print('cross_val_max_cases set to -1, cross_val_N not set to 1. Setting cross_val_max_cases to default (20)' )
ui['cross_val_max_cases']=20
splitodule=ShuffleSplit(n_splits=ui['cross_val_max_cases'], test_size=ui['cross_val_N'])
for train,val in splitodule.split(T.X):
case.Xval_cases.append(types.SimpleNamespace())
case.Xval_cases[-1].train=train
case.Xval_cases[-1].val=val
case.Xval_cases[-1].T=types.SimpleNamespace()
case.Xval_cases[-1].T.X=np.array(T.X[train])
case.Xval_cases[-1].T.Y=np.array(T.Y[train])
case.Xval_cases[-1].V=types.SimpleNamespace()
case.Xval_cases[-1].V.X=np.array(T.X[val])
case.Xval_cases[-1].V.Y=np.array(T.Y[val])
case.Xval_cases[-1].supressplot=1
case.Xval_cases[-1].supressplot=0
return case.Xval_cases
def run_reg_module(Xval_case,case,ui,common_variables,active_wavenumers,logfile,keywords={}):
T=Xval_case.T
V=Xval_case.V
supressplot=Xval_case.supressplot
wavenumbers=case.wavenumbers
folder=case.folder
try:
keywords=case.keywords
except:
keywords={}
print('let the developers know if you see this error')
# Set what datapoints to include, the parameter 'wavenum' is in units cm^-1
#datapointlists=ui.datapointlists
# common_variables.tempax and common_variables.tempfig are for the figure that is saved, common_variables.ax and common_variables.fig are for the figure that is displayed
# need to have this for the colorbar
if ui['save_check_var']:
common_variables.tempax.fig=common_variables.tempfig
#plot best result
# or only result if not MW
reg_module=PLSRregressionMethods.getRegModule(ui['reg_type'],keywords)
#reg_module.active_wavenumers=active_wavenumers
# get RMSe
for E in [T,V]:
if len(E.Y)>0:
E.Xsmol=E.X[:,active_wavenumers]
reg_module.fit(T.Xsmol, T.Y)
for E in [T,V]:
if len(E.Y)>0:
E.pred = reg_module.predict(E.Xsmol)[:,0]
else:
E.pred = []
Xval_case.RMSECP=np.sqrt((np.sum((T.pred-T.Y)**2)+np.sum((V.pred-V.Y)**2))/(len(T.Y)+len(V.Y)))
Xval_case.RMSEC=np.sqrt((np.sum((T.pred-T.Y)**2))/(len(T.Y)))
if len(V.Y)>0:
Xval_case.RMSEP=np.sqrt((np.sum((V.pred-V.Y)**2))/(len(V.Y)))
'''if ui['RMS_type']=='Combined RMSEP+RMSEC' and len(V.Y)>0:
RMSe=Xval_case.RMSECP
Y_for_r2=np.concatenate((T.Y,V.Y))
pred_for_r2=np.concatenate((T.pred,V.pred))
el'''
if ui['RMS_type']=='RMSEP':
RMSe=Xval_case.RMSEP
Y_for_r2=V.Y
pred_for_r2=V.pred
else:
RMSe=Xval_case.RMSEC
Y_for_r2=T.Y
pred_for_r2=T.pred
case.XvalRMSEs.append(RMSe)
#calculating coefficient of determination
if not hasattr(case,'X_val_pred'):
case.X_val_pred=[pred_for_r2]
case.X_val_Y=[Y_for_r2]
else:
case.X_val_pred.append(pred_for_r2)
case.X_val_Y.append(Y_for_r2)
if not supressplot: # if plotting this, calculate R^2 for all xval cases
X_pred=np.array(case.X_val_pred).reshape(-1)
X_Y=np.array(case.X_val_Y).reshape(-1)
y_mean = np.sum(X_Y)*(1/len(X_Y))
Xval_case.R_squared = 1 - ((np.sum((X_Y - X_pred)**2))/(np.sum((X_Y - y_mean)**2)))
avg=np.average(X_pred-X_Y)
n=len(X_pred)
Xval_case.SEP=np.sqrt(np.sum( ( X_pred-X_Y-avg )**2 )/(n-1))
Xval_case.mean_absolute_error=sklearn.metrics.mean_absolute_error(X_Y,X_pred)
Xval_case.mean_absolute_error_percent=100/len(X_Y) * np.sum(np.abs(X_Y-X_pred)/X_Y)
else:
Xval_case.R_squared=0
Xval_case.SEP=0
try:
Xval_case.R_not_squared=sqrt(Xval_case.R_squared)
except:
Xval_case.R_not_squared=0
if ui['coeff_det_type']=='R^2':
coeff_det = Xval_case.R_squared
elif ui['coeff_det_type']=='R':
coeff_det = Xval_case.R_not_squared
if reg_module.type=='classifier':#'classifier_type' in keywords:
frac_cor_lab=PLSRclassifiers.get_correct_categorized(case.X_val_Y[-1],case.X_val_pred[-1])
case.XvalCorrClass.append(frac_cor_lab)
else:
frac_cor_lab=-1
#plot
if not supressplot:
if not ui['do_not_save_plots']:
PLSRsave.plot_regression(Xval_case,case,ui,fns.add_axis(common_variables.fig,ui['fig_per_row'],ui['max_plots']),keywords,RMSe, coeff_det,frac_cor_lab=frac_cor_lab)
if ui['save_check_var']:
if not ui['do_not_save_plots']:
PLSRsave.plot_regression(Xval_case,case,ui,common_variables.tempax,keywords,RMSe, coeff_det,frac_cor_lab=frac_cor_lab)
common_variables.tempfig.subplots_adjust(bottom=0.13,left=0.15, right=0.97, top=0.95)
#common_variables.tempfig.savefig(folder+'Best'+'Comp'+str(components)+'Width'+str(round(Wwidth,1))+'Center'+str(round(Wcenter,1))+'.pdf')
#common_variables.tempfig.savefig(folder+'Best'+'Comp'+str(components)+'Width'+str(round(Wwidth,1))+'Center'+str(round(Wcenter,1))+'.svg')
plotFileName=case.folder+ui['reg_type']+PLSRsave.get_unique_keywords_formatted(common_variables.keyword_lists,case.keywords).replace('.','p')
common_variables.tempfig.savefig(plotFileName+ui['file_extension'])
PLSRsave.add_line_to_logfile(logfile,Xval_case,case,ui,keywords,RMSe,coeff_det,frac_cor_lab=frac_cor_lab)
#draw(common_variables)
return reg_module, RMSe
class moduleClass():
filetypes=['DPT','dpt','list','txt','laser']
def __init__(self, fig, locations, frame, ui):
#reload modules
if frame.module_reload_var.get():
if 'modules.libs.PLSRsave' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRsave'])
if 'modules.libs.PLSRGeneticAlgorithm' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRGeneticAlgorithm'])
if 'modules.libs.PLSRsequential_feature_selectors' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRsequential_feature_selectors'])
if 'modules.libs.PLSRNN' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRNN'])
if 'modules.libs.PLSRRNN' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRRNN'])
if 'modules.libs.PLSRCNN' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRCNN'])
if 'modules.libs.PLSR_file_import' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSR_file_import'])
if 'modules.libs.PLSRregressionMethods' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRregressionMethods'])
if 'modules.libs.PLSRclassifiers' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRclassifiers'])
if 'modules.libs.PLSRregressionVisualization' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRregressionVisualization'])
if 'modules.libs.PLSRpreprocessing' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRpreprocessing'])
if 'modules.libs.PLSRwavelengthSelection' in sys.modules: #reload each time it is run
importlib.reload(sys.modules['modules.libs.PLSRwavelengthSelection'])
#code for checking for memory leaks
global run #global keyword used to connect button clicks to class object
run=self
self.fig=fig
self.locations=locations
self.frame=frame
self.ui=ui
def clear_memory(self):
safe_keys=['fig','locations','frame','ui','wrapper_i','wrapper_max']
keys=[]
for key in self.__dict__:
keys.append(key)
for key in keys:
if not key in safe_keys:
delattr(self,key)
def run(self):
if not self.ui['use_wrapper']:
self.run_wrapper_case()
else:
import gc
gc.collect() #collect garbage to free memory from last run
self.wrapper_i=1
self.wrapper_max=len(self.ui['binning'])
if self.ui['filter']=='Try all': self.wrapper_max*=6
if self.ui['try_all_scatter_correction']: self.wrapper_max*=4
if self.ui['try_all_normalize']: self.wrapper_max*=4
if self.ui['scaling']=='Try all': self.wrapper_max*=2
if self.ui['mean_centering']=='Try all': self.wrapper_max*=2
bins=self.ui['binning']
for bin in bins:
self.ui['binning']=[bin]
self.scatter_cor_wrapper()
self.ui['binning']=bins
def scatter_cor_wrapper(self):
#{'key': 'filter', 'type': 'radio:text', 'texts': ['No filter', 'MA', 'Butterworth', 'Hamming','Fourier','Try all'], 'tab': 0, 'row': 7} ,
if self.ui['filter']=='Try all':
self.ui['use_SG']='No SG'
for f in ['No filter', 'MA', 'Butterworth', 'Hamming','Fourier','SG']:
#print(self.__dict__)
self.ui['filter']=f
if self.ui['filter']=='SG':
self.ui['filter']='No filter'
self.ui['use_SG']='use SG'
if self.ui['try_all_scatter_correction']:
self.ui['try_all_scatter_correction']=0
self.ui['normalize']=0
self.ui['SNV_key']=0
self.ui['MSC_key']=0
self.normalize_wrapper()
self.ui['normalize']=1
self.normalize_wrapper()
self.ui['normalize']=0
self.ui['SNV_key']=1
self.normalize_wrapper()
self.ui['SNV_key']=0
self.ui['MSC_key']=1
self.normalize_wrapper()
self.ui['MSC_key']=0
self.ui['try_all_scatter_correction']=1
else:
self.normalize_wrapper()
self.ui['use_SG']='No SG'
self.ui['filter']='Try all'
else:
if self.ui['try_all_scatter_correction']:
self.ui['try_all_scatter_correction']=0
self.ui['normalize']=0
self.ui['SNV_key']=0
self.ui['MSC_key']=0
self.normalize_wrapper()
self.ui['normalize']=1
self.normalize_wrapper()
self.ui['normalize']=0
self.ui['SNV_key']=1
self.normalize_wrapper()
self.ui['SNV_key']=0
self.ui['MSC_key']=1
self.normalize_wrapper()
self.ui['MSC_key']=0
self.ui['try_all_scatter_correction']=1
else:
self.normalize_wrapper()
def normalize_wrapper(self):
ui=self.ui
if not ui['try_all_normalize']:
self.scaling_wrapper()
else:
ui['try_all_normalize']=0
#ui['normalize']=0
ui['baseline_value']=0
ui['baseline_linear']=0
ui['baseline_background']=0
ui['derivative']='Not der'
#
self.scaling_wrapper()
#
#ui['normalize']=1
#self.scaling_wrapper()
#ui['normalize']=0
#
ui['baseline_value']=1
self.scaling_wrapper()
ui['baseline_value']=0
#
ui['baseline_linear']=1
self.scaling_wrapper()
ui['baseline_linear']=0
#
ui['baseline_background']=1
self.scaling_wrapper()
ui['baseline_background']=0
#
ui['derivative']='1st der'
self.scaling_wrapper()
ui['derivative']='2nd der'
self.scaling_wrapper()
ui['derivative']='Not der'
ui['try_all_normalize']=1
return
#{'key': 'scaling', 'type': 'radio:text', 'texts': ['No scaling', 'Scaling','Try all'], 'tab': 0, 'row': 2}
def scaling_wrapper(self):
if not self.ui['scaling']=='Try all':
self.mean_centering_wrapper()
else:
self.ui['scaling']='No scaling'
self.mean_centering_wrapper()
self.ui['scaling']='Scaling'
self.mean_centering_wrapper()
self.ui['scaling']='Try all'
#{'key': 'mean_centering', 'type': 'radio:text', 'texts': ['No mean centering', 'Mean centering','Try all'], 'tab': 0, 'row': 2} ,
def mean_centering_wrapper(self):
if not self.ui['mean_centering']=='Try all':
self.clear_memory()
print('wrapper i = ',self.wrapper_i, ' of ', self.wrapper_max)
self.wrapper_i+=1
self.run_wrapper_case()
else:
self.ui['mean_centering']='No mean centering'
self.clear_memory()
print('wrapper i = ',self.wrapper_i, ' of ', self.wrapper_max)
self.wrapper_i+=1
self.run_wrapper_case()
self.ui['mean_centering']='Mean centering'
self.clear_memory()
print('wrapper i = ',self.wrapper_i, ' of ', self.wrapper_max)
self.wrapper_i+=1
self.run_wrapper_case()
self.ui['mean_centering']='Try all'
def run_wrapper_case(self):
fig=self.fig
locations=self.locations
frame=self.frame
ui=self.ui
eprint('running')
self.fig=fig
fig.clf()
self.frame=frame
# get variables from buttons
common_variables=types.SimpleNamespace()
common_variables.draw=self.draw
self.common_variables=common_variables
common_variables.keyword_lists={}
PLSRregressionMethods.get_relevant_keywords(common_variables,ui)
ui['multiprocessing']=1-(ui['no_multiprocessing'])
save_check_var=frame.save_check_var.get()
ui['save_check_var']=save_check_var
filename=frame.name_field_string.get()
self.filename=filename
#prepare figures for display (set correct number of axes, each pointing to the next axis)
######################### if crossval and moving window -> stop ###########
if ui['is_validation']=='X-val on training' and ui['regression_wavelength_selection']=='Moving window':
print("Use of x-validation with moving window is not supported")
return
######################### if RMSEP and no validation -> stop ##############
if ui['is_validation']=='Training' and ui['RMS_type']=='RMSEP':
print("Unable to calculate RMSEP with only training set")
return
#################### if RMSEP and RMSEC and no validation -> only RMSEP ###
if ui['is_validation']=='Training':
ui['RMS_type']='RMSEC'
if ui['RMS_type']=='Default':
ui['RMS_type']='RMSEC'
else:
if ui['RMS_type']=='Default':
ui['RMS_type']='RMSEP'
common_variables.frame=frame
common_variables.fig=fig
################################################################################################
######################### Load data as training or validation ##################################
################################################################################################
T=types.SimpleNamespace()
V=types.SimpleNamespace()
if len(frame.training_files)==0:
print('training set required')
return
#load training set
T.X, T.Y, common_variables.trainingfiles, self.wavenumbers, self.regressionCurControlTypes=PLSR_file_import.get_files(frame.training_files,ui['max_range'])
self.original_wavenumbers=self.wavenumbers
for i, contrltytpe in enumerate(self.regressionCurControlTypes):
frame.button_handles['cur_col'][i]["text"]=contrltytpe
if ui['is_validation']=='Training' or ui['is_validation']=='X-val on training':# if training or crossval -> deselect validation
frame.nav.deselect()
#frame.nav.clear_color('color3')
#frame.validation_files=frame.nav.get_paths_of_selected_items()
V.X=np.array([]) # set empty validation set
V.Y=np.array([])
elif ui['is_validation']=='Training and Validation':
if len(frame.validation_files)==0:
print('training and validation set, but no validation set in in put')
return
#load validation set
V.X, V.Y, common_variables.validationfiles, _, _2=PLSR_file_import.get_files(frame.validation_files,ui['max_range'])
common_variables.original_T=copy.deepcopy(T)
common_variables.original_V=copy.deepcopy(V)
################################################################################################
################################## load reference spectra #######################################
################################################################################################
if ui['reference_spectra']=='':
self.reference_spectra=None
else:
try:
temp, _1, _2, _3, _4=PLSR_file_import.get_files([ui['reference_spectra']],np.inf)
if len(temp)>0:
print('first reference spectra in list selected for reference spectra selected as reference spectra')
self.reference_spectra=np.array(temp[0])
except Exception as e:
self.reference_spectra=None
print(e)
print('error importing referece spectra -> ignoring')
if ui['background_spectra']=='':
self.background_spectra=None
else:
try:
temp, _1, _2, _3, _4=PLSR_file_import.get_files([ui['background_spectra']],np.inf)
if len(temp)>0:
print('first background spectra in list selected for reference spectra selected as reference spectra')
self.background_spectra=np.array(temp[0])
except Exception as e:
self.background_spectra=None
print(e)
print('error importing referece spectra -> ignoring')
################################################################################################
################# set up folder, save log and temporary figure for saving ######################
################################################################################################
if save_check_var:
if not os.path.exists(filename):
os.makedirs(filename)
PLSRsave.SaveLogFile(filename,ui,common_variables)
common_variables.tempfig,common_variables.tempax=PLSRsave.make_tempfig(ui,frame)
################################################################################################
############################## calculate window ranges #########################################
################################################################################################
common_variables.datapoints=np.arange(len(self.wavenumbers))
#common_variables.datapointlists=[common_variables.datapoints]# declare this for get_or_make_absorbance_ax
#common_variables.datapoints, common_variables.datapointlists=PLSRpreprocessing.GetDatapoints(self.wavenumbers, ui)
################################################################################################
################################### save unprocessed spectra ###################################
################################################################################################
if ui['plot_spectra_before_preprocessing']:
eprint('plot abs')
if ui['save_check_var']:
PLSRsave.PlotAbsorbance(common_variables.tempax,common_variables.tempfig,common_variables.datapoints,ui,self.wavenumbers,T.X,V.X)
plotFileName=filename+'/SpectraPrePreprocessing'
common_variables.tempfig.savefig(plotFileName.replace('.','p')+ui['file_extension'])
common_variables.tempax.cla()
ax=PLSRsave.get_or_make_absorbance_ax(self)
self.draw()
################################################################################################
################################### make pychem input file #####################################
################################################################################################
if int(ui['make_pyChem_input_file']):
if ui['is_validation']=='Training and Validation':
PLSRsave.writePyChemFile(T.X,T.Y,validation,validationtruevalues)
else:
PLSRsave.writePyChemFile(T.X,T.Y,[],[])
################################################################################################
################## set current control and remove data higher than maxrange ####################
################################################################################################
datasets=[T]
if ui['is_validation']=='Training and Validation':
datasets.append(V)
for E in datasets:
keepsamples=[]
for i,_ in enumerate(E.Y):
if not E.Y[i,ui['cur_col']] > ui['max_range']:
keepsamples.append(i)
E.X=E.X[keepsamples,:]
E.Y=E.Y[keepsamples,ui['cur_col']]
ui['cur_control_string']=self.regressionCurControlTypes[ui['cur_col']]
PLSRpreprocessing.do_preprocessing(self,T,V)
if ui['plot_fourier']:
if hasattr(T,'X_fft'):
ax=fns.add_axis(fig,ui['fig_per_row'],ui['max_plots'])
PLSRsave.plot_fourier(ax,fig,T,V,ui)
self.complete_cases=[]
for _ in [1]: # is a loop so that you can use 'break'
for i,dercase in enumerate(self.preprocessed_cases):
#need to set data range in case of derrivative, rerunn in all cases anyways
datapoints=PLSRpreprocessing.GetDatapoints(dercase.wavenumbers, ui)
#common_variables.datapoints=datapoints
#common_variables.datapointlists=datapointlists
if ui['plot_spectra_after_preprocessing']:
ax=fns.add_axis(fig,ui['fig_per_row'],ui['max_plots'])
PLSRsave.PlotAbsorbance(ax,fig,datapoints,ui,dercase.wavenumbers,dercase.T.X,dercase.V.X,dercase=dercase)
self.draw()
if ui['save_check_var']:
PLSRsave.PlotAbsorbance(common_variables.tempax,common_variables.tempfig,datapoints,ui,dercase.wavenumbers,dercase.T.X,dercase.V.X,dercase=dercase)
plotFileName=dercase.folder+'/SpectraPostPreprocessing'
common_variables.tempfig.savefig(plotFileName.replace('.','p')+ui['file_extension'])
common_variables.tempax.cla()
for E in [dercase.T,dercase.V]:
if len(E.Y)>0:
E.X=E.X[:,datapoints]
dercase.wavenumbers=dercase.wavenumbers[datapoints]
#create complete cases for all pemutations of keyword values in keyword_lists
for keyword_case in PLSRregressionMethods.generate_keyword_cases(common_variables.keyword_lists):
self.complete_cases.append(types.SimpleNamespace())
self.complete_cases[-1].wavenumbers=dercase.wavenumbers
self.complete_cases[-1].folder=dercase.folder
self.complete_cases[-1].sg_config=dercase.sg_config
self.complete_cases[-1].derrivative=dercase.derrivative
self.complete_cases[-1].T=dercase.T
self.complete_cases[-1].V=dercase.V
self.complete_cases[-1].preprocessing_done=dercase.preprocessing_done
self.complete_cases[-1].keywords=keyword_case
if ui['reg_type']=='None':
break
for case in self.complete_cases:
case.XvalRMSEs=[]
case.XvalCorrClass=[]
common_variables.keywords=case.keywords
#GeneticAlgorithm(ui,T,V,datapoints,components)
if ui['regression_wavelength_selection']=='No wavelength selection':
active_wavenumers = np.ones(len(case.wavenumbers), dtype=bool)
else:
# report to user regarding split module
if self.ui['WS_loss_type']=='X-validation on training':
if self.ui['WS_cross_val_N']==1 and self.ui['WS_cross_val_max_cases']==-1:
print('Using sklearn.LeaveOneOut on '+str(len(case.T.Y))+' measurements. Maxcases set to '+str(len(case.T.Y)))
else:
if self.ui['WS_cross_val_max_cases']==-1:
print('WS_cross_val_max_cases set to -1, GA_cross_val_N not set to 1. Setting GAcross_val_max_cases to default (20)' )
self.ui['WS_cross_val_max_cases']=20
if ui['regression_wavelength_selection']=='Genetic Algorithm':
GAobject = PLSRGeneticAlgorithm.GeneticAlgorithm(common_variables,ui,case)
active_wavenumers = GAobject.run(fns.add_axis(common_variables.fig,ui['fig_per_row'],ui['max_plots']),case.wavenumbers,case.folder,self.draw)
elif ui['regression_wavelength_selection']=='Moving Window':
active_wavenumers = PLSRwavelengthSelection.MW(case,ui,common_variables)
elif ui['regression_wavelength_selection']=='Sequential Feature Selector':
FSobject = PLSRsequential_feature_selectors.sequentialFeatureSelector(common_variables,ui,case,self.draw)
active_wavenumers = FSobject.run()
Xval_cases=crossval(case.T,case.V,ui,case) # returns [T],[V] if not crossva, otherwise makes cases from validation dataset
for Xval_case in Xval_cases:
# ui.datapoints=runGeneticAlgorithm(dercase[0],dercase[1],dercase[2],dercase[3],dercase[4],dercase[5],dercase[6],dercase[7])
#def MW(T,V,wavenumbers, folder,ui,sg_config,curDerivative,supressplot):
if ui['save_check_var'] and not ui['do_not_save_plots']:
active_wavenumbers_file=case.folder+ui['reg_type']+PLSRsave.get_unique_keywords_formatted(common_variables.keyword_lists,case.keywords).replace('.','p')+'active_wavenumers.dpb'
PLSRsave.save_active_wavenumbers(active_wavenumbers_file,case.wavenumbers,active_wavenumers)
case.active_wavenumers=active_wavenumers
self.draw()
self.last_reg_module, RMSe = run_reg_module(Xval_case,case,ui,common_variables,active_wavenumers,self.filename+'/results_table',keywords={})
self.draw()
self.last_complete_case = case
self.last_Xval_case = Xval_case
if Xval_case.supressplot==0:
if ui['is_validation']=='X-val on training':
#if ui['RMS_type']=='Combined RMSEP+RMSEC':
# print('RMSEC+RMSEP = '+PLSRsave.custom_round(case.xvalRMSE,3)+' '+ui['unit'])
if not 'classifier_type' in case.keywords:
case.xvalRMSE=np.sqrt(np.sum(np.array(case.XvalRMSEs)**2)/len(case.XvalRMSEs))
if ui['RMS_type']=='RMSEC':
print('RMSEC = '+PLSRsave.custom_round(case.xvalRMSE,3)+' '+ui['unit'])
elif ui['RMS_type']=='RMSEP':
print('RMSEP = '+PLSRsave.custom_round(case.xvalRMSE,3)+' '+ui['unit'])
else:
print(case.XvalCorrClass)
case.xvalCorrClas=np.average(case.XvalCorrClass)
print(case.xvalCorrClas)
if ui['RMS_type']=='RMSEC':
print('x-val corr classifed training = '+str(round(case.xvalCorrClas*100,3))+' %')
elif ui['RMS_type']=='RMSEP':
print('x-val corr classifed prediction = '+str(round(case.xvalCorrClas*100,3))+' %')
case.XvalRMSEs=[]
eprint('done')
#plt.close(common_variables.tempfig)
#del common_variables.tempfig
if save_check_var:
# save plot in window
fig.savefig(filename+'/'+'_'.join(filename.split('/')[1:])+ui['file_extension'])
print('Done')
return
def callbackClick(self,frame,event):
ax=event.inaxes
if hasattr(ax,'plot_type'):
if ax.plot_type=='NN node map':
PLSRregressionVisualization.plot_node_activation_vector(event)
return
else:
print("clicked at", event.xdata, event.ydata)
def reorder_plots(self,event):
ui=self.ui
ui['fig_per_row']=int(self.frame.buttons['fig_per_row'].get())
ui['max_plots']=int(self.frame.buttons['max_plots'].get())
fns.move_all_plots(self.fig,ui['fig_per_row'],ui['max_plots'])
self.draw()
@fns.rimt
def draw(self):
self.fig.canvas.draw()
self.frame.update()
def addButtons():
buttons=[
{'key': 'RNNtab3name', 'type': 'tabname', 'text': 'Import Options', 'tab': 3} ,
# dataset configuration
{'key': 'RegressionL0', 'type': 'label', 'text': 'Data import options: ', 'tab': 3, 'row': 0} ,
{'key': 'is_validation', 'type': 'radio:text', 'texts': ['Training', 'Training and Validation', 'X-val on training'], 'tab': 3, 'row': 0} ,
{'key': 'cross_val_N', 'type': 'txt:int', 'text': 'Number of validation samples for cross validation', 'default': '10', 'width': 4, 'tab': 3, 'row': 1} ,
{'key': 'cross_val_max_cases', 'type': 'txt:int', 'text': 'Iterations', 'default': '-1', 'width': 4, 'tab': 3, 'row': 1} ,
{'key': 'RegressionL0a', 'type': 'label', 'text': 'Column of data to use: ', 'tab': 3, 'row': 2} ,
{'key': 'cur_col', 'type': 'radio', 'texts': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], 'tab': 3, 'row': 2} ,
{'key': 'max_range', 'type': 'txt:float', 'text': 'Maximum concentration for training set', 'default': '10000', 'width': 6, 'tab': 3, 'row': 3} ,
{'key': 'unit', 'type': 'txt', 'text': 'Concentration unit', 'default': 'mg/dl', 'width': 6, 'tab': 3, 'row': 4} ,
# config for creating figure and saving
{'key': 'file_extension', 'type': 'radio:text', 'texts': [ '.svg', '.png', '.pdf'], 'tab': 4, 'row': 1} ,
{'key': 'reorder_plots', 'type': 'click', 'text': 'Reorder plots', 'bind': reorder_plots, 'tab': 4, 'row': 1} ,
{'key': 'fig_per_row', 'type': 'txt:int', 'text': 'Figures per row', 'default': '2', 'width': 4, 'tab': 4, 'row': 1} ,
{'key': 'DPI', 'type': 'txt:int', 'text': 'dpi', 'default': '80', 'width': 4, 'tab': 4, 'row': 1} ,
# graphical user interface options
{'key': 'max_plots', 'type': 'txt:int', 'text': 'Max number of plots', 'default': '-1', 'width': 3, 'tab': 4, 'row': 2} ,
# save options
{'key': 'make_pyChem_input_file', 'type': 'check', 'text': 'Make pyChem file', 'tab': 4, 'row': 9} ,
{'key': 'do_not_save_plots', 'type': 'check', 'text': 'do not save plots', 'tab': 4, 'row': 8} ,
{'key': 'use_wrapper', 'type': 'check', 'text': 'use wrapper', 'tab': 4, 'row': 8} ,
# debugging options
{'key': 'RNNtab5name', 'type': 'tabname', 'text': 'Other', 'tab': 5} ,
{'key': 'no_multiprocessing', 'type': 'radio', 'texts': ['use multiprocessing', 'do not use multiprocessing'], 'tab': 5, 'row': 0},
# result
{'key': 'RMS_type', 'type': 'radio:text', 'texts': ['Default', 'RMSEC', 'RMSEP'], 'tab': 3, 'row': 6} ,
{'key': 'coeff_det_type', 'type': 'radio:text', 'texts': ['R^2', 'R'], 'tab': 3, 'row': 7} ,
{'key': 'SEP_MAE_or_%MAE', 'type': 'radio:text', 'texts': ['SEP', 'MAE','%MAE'], 'tab': 3, 'row': 8} ,
# declare input
{'key': 'set_training', 'type': 'click', 'text': 'Set Training', 'bind': set_training,'color':'color1', 'tab': 10, 'row': 0} ,
{'key': 'set_validation', 'type': 'click', 'text': 'Set Validation', 'bind': set_validation,'color':'color3', 'tab': 10, 'row': 0} ,
]
buttons+=PLSRregressionMethods.get_buttons()
buttons+=PLSRclassifiers.get_buttons()
buttons+=PLSRsave.get_buttons()
buttons+=PLSRwavelengthSelection.get_buttons()
buttons+=PLSRpreprocessing.get_buttons()
return buttons
def set_training(event):
"""Sets the training data set(s) in the GUI."""
frame=event.widget.master.master.master
frame.nav.clear_color('color1')
frame.nav.color_selected('color1')
frame.training_files=frame.nav.get_paths_of_selected_items()
frame.nav.deselect()
return
def set_validation(event):
"""Sets the validation data set(s) in the GUI."""
frame=event.widget.master.master.master
frame.nav.clear_color('color3')
frame.nav.color_selected('color3')
frame.validation_files=frame.nav.get_paths_of_selected_items()
frame.nav.deselect()
return
def reorder_plots(event):
global run
run.reorder_plots(run,event)
return
```
#### File: SpecAnalysis/modules/scansAveraging_absorbance.py
```python
import numpy as np
import fns
import copy
from .libs import signal_alignment
from .libs import signal_alignment
#function for averaging over n pulses
def averageN(y,n=5):
nh=int(n/2)
rest=n-2*nh
y2=np.array(y.copy())
l=len(y)
y2[int(nh):l-1-nh]=0.0
for i in range(-nh,nh+rest,1):
#print(int(nh)+i,l-1-nh+i,l)
y2[int(nh):l-1-nh]+=y[int(nh)+i:l-1-nh+i]/float(n)
return y2
def averageM(y,m=5):
for j in range(y.shape[1]):
y2=np.concatenate([copy.copy(y[:,j]),copy.copy(y[:,j])])
for i in range(y.shape[0]):
y[i,j]=np.average(y2[i:i+m])
return y
class moduleClass:
filetypes = ['bin']
def __init__(self, fig, locations, frame, ui):
self.ui=ui
self.fig=fig
self.scans=[]
self.frame=frame
for fname in locations:
data=np.fromfile(fname,dtype=np.int16)
rate=80000000 #only valid for 100kHz repetition rate
####################### get periodicity in data with FFT
fftfreq=np.fft.fftfreq(24000)[1:]
fft=np.fft.fft(data[0:24000])[1:]/fftfreq # divide by fftfreq
period=int(1/fftfreq[np.argmax(abs(fft))])
period=800
#print(period)
####################### Get first pulse
somezeroes = np.zeros(100, dtype=np.int16)
data = np.concatenate((somezeroes, data)) ##adding some zeroes to the beginning of data
#bin1 = np.concatenate((somezeroes, bin1)) ##adding some zeroes to the beginning of data
#bin2 = np.concatenate((somezeroes, bin2)) ##adding some zeroes to the beginning of data
maxstart=max(data[0:2000])
nextpulse=np.argmax(data[0:2000]-np.arange(0,maxstart,maxstart/(2000.0-0.5)))
pulsewindow=50
correctPlace=data[nextpulse-pulsewindow:nextpulse+pulsewindow]
#nextpulse+=np.argmax(correctPlace)-40
corvector=np.arange(-pulsewindow,pulsewindow,1)
nextpulse+=int(np.sum(correctPlace*corvector)/np.sum(correctPlace))
####################### get max value of each pulse
numpulses=int(len(data)/period)+100 #make array extra long, make sure it is long enough
pulseIntensity=np.zeros(numpulses) #np.zeros(len(data)/period)
dichal1=np.zeros(numpulses) #np.zeros(len(data)/period)
i=0
while nextpulse+20 < len(data) and nextpulse < 80000000:
#print(nextpulse,i)
if i%500==0:
# every 500 pulses: refine pulse position
#plt.plot(data[nextpulse-20:nextpulse+20])
correctPlace=data[nextpulse-pulsewindow:nextpulse+pulsewindow]
#nextpulse+=np.argmax(correctPlace)-80
#print(np.sum(correctPlace))
if np.sum(correctPlace) >1000:
nextpulse+=int(np.sum(correctPlace*corvector)/np.sum(correctPlace))
#pulseIntensity[i]=np.max(data[nextpulse-40:nextpulse+40])
pulseIntensity[i]=np.sum(data[nextpulse-pulsewindow:nextpulse+pulsewindow]) # integrate the pulse
#dichal1[i]=bin1[nextpulse]
nextpulse+=period
i+=1
####################### cut off excess length of pulseIntensity
i=-1
while pulseIntensity[i]==0:
i-=1
numpulses=numpulses+i
pulseIntensity=pulseIntensity[0:i+1]
self.scans.append(pulseIntensity)
#self.scans=np.array(self.scans)
return
def run(self):
self.fig.clf()
ax=fns.add_axis(self.fig,1)
StartWL=1200
EndWL=925
minscanlength=np.inf
for scan in self.scans:
if len(scan)<minscanlength:
minscanlength=len(scan)
for i,scan in enumerate(self.scans):
self.scans[i]=scan[0:minscanlength]
n=255
self.scans[i]=averageN(self.scans[i],n)
self.scans=np.array(self.scans)
self.averagescans=np.average(self.scans,axis=0)
for i,scan in enumerate(self.scans):
self.scans[i]=np.log10(scan/self.averagescans)
for i, scan in enumerate(self.scans):
if i > 0:
s = signal_alignment.chisqr_align(self.scans[0], scan, [0,20000], init=0, bound=50)
print(s)
self.scans[i] = signal_alignment.shift(scan, s, mode='nearest')
#StartWL=1200
#EndWL=925
#self.wavenumbers=StartWL+(EndWL-StartWL)*np.arange(minscanlength)/minscanlength
StartWL=1200
EndWL=925
self.wavenumbers=StartWL+(EndWL-StartWL)*np.arange(minscanlength)/minscanlength
numPulses=1000
step=100
self.ms=[10]
self.averaged_scans=[]
for i,m in enumerate(self.ms):
self.averaged_scans.append(copy.deepcopy(self.scans))
self.averaged_scans[-1]=averageM(self.averaged_scans[-1],m)
self.plot_me(ax,step,numPulses,EndWL,StartWL)
if self.frame.save_check_var.get():
tempfig = self.frame.hidden_figure
tempfig.set_size_inches(4*1.2, 3*1.2)
tempfig.set_dpi(300)
tempfig.clf()
tempax = tempfig.add_subplot(1, 1, 1)
tempfig.subplots_adjust(bottom=0.17,left=0.16, right=0.97, top=0.97)
self.plot_me(tempax,step,numPulses,EndWL,StartWL)
filename=self.frame.name_field_string.get()
tempfig.savefig(filename+'.png')
tempfig.savefig(filename+'.svg')
return
def plot_me(self,ax,step,numPulses,EndWL,StartWL):
figure=ax.figure
#[x,y,width,height]
pos = [0.3, 0.25, 0.3, 0.2]
newax = figure.add_axes(pos)
for i,m in enumerate(self.ms):
dat=self.averaged_scans[i][:,step//2:step*numPulses+step//2:step].swapaxes(0,1)
dat=np.std(dat,axis=1)
#ax.semilogy(self.wavenumbers[step//2:step*numPulses+step//2:step],
# dat*100)
xax=self.wavenumbers[step//2:step*numPulses+step//2:step]
if m==1:
label='1 scan'
else:
label=str(m)+' scans'
'''ax.fill_between(xax[1:-1],
-dat[1:-1]*100,
dat[1:-1]*100,
label=label)'''
ax.plot(xax[1:-1],
dat[1:-1],
label=label)
newax.loglog([m],[np.average(dat[1:-27000//100])],'x') #1200-1000
ax.legend(loc=2)
#ax.text(i+0.5,1.075,str(m))
#ax.set_xticks(np.arange(len(self.ms))+0.5)
#ax.set_xticklabels(self.ms)
ax.invert_xaxis()
#ax.set_xlabel(r'Wavenumbers [cm$^-1$]')
ax.set_ylabel(r'Deviation from mean intensity [%]')
ax.set_xlabel(r'Wavenumber [cm-1]')
#ax.set_ylim([-1,1])
return
def addButtons():
buttons = [
#{'key': 'joinPNG_orientation', 'type': 'radio:text', 'texts': ['horizontal', 'vertical'], 'tab': 0, 'row': 0} ,
]
return buttons
```
|
{
"source": "Jernic-Technologies/Soka",
"score": 3
}
|
#### File: Jernic-Technologies/Soka/Soka-ES.py
```python
import os
import tkinter as tk
from tkinter import filedialog
from setuptools import Command
# Codigo Base
root = tk.Tk()
apps = []
# Icono
root.iconbitmap("favicon.ico")
root.title('Soka')
# Comandos
def add_app():
for widget in frame.winfo_children():
widget.destroy()
filename = filedialog.askopenfilename(initialdir="/", title="Seleccionar Archivo",
filetypes=(("Ejecutable", "*.exe"), ("all files", "*.*")))
apps.append(filename)
print(filename)
for app in apps:
label = tk.Label(frame, text=app, bg="gray")
label.pack()
def run_apps():
for app in apps:
os.startfile(app)
def quit():
root.quit()
# Menu Bar
menu_bar = tk.Menu(root)
file_bar = tk.Menu(menu_bar, tearoff=0)
file_bar.add_command(label='Add', command=add_app)
file_bar.add_command(label='Close Soka', command=root.quit)
menu_bar.add_cascade(label='File', menu=file_bar)
root.config(menu=menu_bar)
run_bar = tk.Menu(menu_bar, tearoff=0)
run_bar.add_command(label='Run', command=run_apps)
menu_bar.add_cascade(label='Run', menu=run_bar)
root.config(menu=menu_bar)
# Canvas y Frame
canvas = tk.Canvas(root, height=500, width=500, bg="#263D42")
canvas.pack()
frame = tk.Frame(root, bg="white")
frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)
# Botones
openFile = tk.Button(root, text="Abrir archivo", padx=10,
pady=5, fg="white", bg="#263D42", command=add_app)
openFile.pack()
runApps = tk.Button(root, text="Ejecutar Aplicaciones", padx=10,
pady=5, fg="white", bg="#263D42", command=run_apps)
runApps.pack()
quit_soka= tk.Button(root, text="Cerrar Soka", padx=10,pady=5, fg="white", bg="#263D42", command=quit)
quit_soka.pack()
for app in apps:
label = tk.Label(frame, text=app)
label.pack()
# Running
root.mainloop()
```
|
{
"source": "jernkuan/thingsboard-python-rest-client",
"score": 2
}
|
#### File: api/api_ce/dashboard_controller_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class DashboardControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_dashboard_customers_using_post(self, body, dashboard_id, **kwargs): # noqa: E501
"""addDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_dashboard_customers_using_post(body, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] body: strCustomerIds (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_dashboard_customers_using_post_with_http_info(body, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.add_dashboard_customers_using_post_with_http_info(body, dashboard_id, **kwargs) # noqa: E501
return data
def add_dashboard_customers_using_post_with_http_info(self, body, dashboard_id, **kwargs): # noqa: E501
"""addDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_dashboard_customers_using_post_with_http_info(body, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] body: strCustomerIds (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_dashboard_customers_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_dashboard_customers_using_post`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `add_dashboard_customers_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/{dashboardId}/customers/add', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_dashboard_to_customer_using_post(self, customer_id, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_customer_using_post(customer_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_dashboard_to_customer_using_post_with_http_info(customer_id, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.assign_dashboard_to_customer_using_post_with_http_info(customer_id, dashboard_id, **kwargs) # noqa: E501
return data
def assign_dashboard_to_customer_using_post_with_http_info(self, customer_id, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_customer_using_post_with_http_info(customer_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_dashboard_to_customer_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `assign_dashboard_to_customer_using_post`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `assign_dashboard_to_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/dashboard/{dashboardId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_dashboard_to_edge_using_post(self, edge_id, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_edge_using_post(edge_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_dashboard_to_edge_using_post_with_http_info(edge_id, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.assign_dashboard_to_edge_using_post_with_http_info(edge_id, dashboard_id, **kwargs) # noqa: E501
return data
def assign_dashboard_to_edge_using_post_with_http_info(self, edge_id, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_edge_using_post_with_http_info(edge_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_dashboard_to_edge_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `assign_dashboard_to_edge_using_post`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `assign_dashboard_to_edge_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/dashboard/{dashboardId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_dashboard_to_public_customer_using_post(self, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_public_customer_using_post(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_dashboard_to_public_customer_using_post_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.assign_dashboard_to_public_customer_using_post_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def assign_dashboard_to_public_customer_using_post_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""assignDashboardToPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_dashboard_to_public_customer_using_post_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_dashboard_to_public_customer_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `assign_dashboard_to_public_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/public/dashboard/{dashboardId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_dashboard_using_delete(self, dashboard_id, **kwargs): # noqa: E501
"""deleteDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard_using_delete(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_dashboard_using_delete_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.delete_dashboard_using_delete_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def delete_dashboard_using_delete_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""deleteDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard_using_delete_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_dashboard_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `delete_dashboard_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/{dashboardId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_dashboards_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_dashboards_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param bool mobile: mobile
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_dashboards_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_dashboards_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_dashboards_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_dashboards_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param bool mobile: mobile
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'mobile', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_dashboards_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_dashboards_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'mobile' in params:
query_params.append(('mobile', params['mobile'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/dashboards{?mobile,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dashboard_by_id_using_get(self, dashboard_id, **kwargs): # noqa: E501
"""getDashboardById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_by_id_using_get(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_by_id_using_get_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_by_id_using_get_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def get_dashboard_by_id_using_get_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""getDashboardById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_by_id_using_get_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dashboard_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `get_dashboard_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/{dashboardId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dashboard_info_by_id_using_get(self, dashboard_id, **kwargs): # noqa: E501
"""getDashboardInfoById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_info_by_id_using_get(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: DashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_info_by_id_using_get_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_info_by_id_using_get_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def get_dashboard_info_by_id_using_get_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""getDashboardInfoById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_info_by_id_using_get_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: DashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dashboard_info_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `get_dashboard_info_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/info/{dashboardId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_dashboards_using_get(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_dashboards_using_get(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
return data
def get_edge_dashboards_using_get_with_http_info(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_dashboards_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_edge_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_edge_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_edge_dashboards_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'start_time' in params:
query_params.append(('startTime', params['start_time'])) # noqa: E501
if 'end_time' in params:
query_params.append(('endTime', params['end_time'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/dashboards{?textSearch,sortProperty,sortOrder,startTime,endTime,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_home_dashboard_info_using_get(self, **kwargs): # noqa: E501
"""getHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_info_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_home_dashboard_info_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_info_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_home_dashboard_info_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/home/info', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HomeDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_home_dashboard_using_get(self, **kwargs): # noqa: E501
"""getHomeDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_home_dashboard_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_home_dashboard_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_home_dashboard_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getHomeDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_home_dashboard_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/home', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HomeDashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_max_datapoints_limit_using_get(self, **kwargs): # noqa: E501
"""getMaxDatapointsLimit # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_max_datapoints_limit_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_max_datapoints_limit_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_max_datapoints_limit_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_max_datapoints_limit_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getMaxDatapointsLimit # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_max_datapoints_limit_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_max_datapoints_limit_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/maxDatapointsLimit', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_server_time_using_get(self, **kwargs): # noqa: E501
"""getServerTime # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_server_time_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_server_time_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_server_time_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_server_time_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getServerTime # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_server_time_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_server_time_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/serverTime', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_dashboards_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_dashboards_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param bool mobile: mobile
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_dashboards_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_dashboards_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_dashboards_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_dashboards_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param bool mobile: mobile
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'mobile', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_dashboards_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_dashboards_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'mobile' in params:
query_params.append(('mobile', params['mobile'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/dashboards{?mobile,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_dashboards_using_get1(self, tenant_id, page_size, page, **kwargs): # noqa: E501
"""getTenantDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_dashboards_using_get1(tenant_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str tenant_id: tenantId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_dashboards_using_get1_with_http_info(tenant_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_dashboards_using_get1_with_http_info(tenant_id, page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_dashboards_using_get1_with_http_info(self, tenant_id, page_size, page, **kwargs): # noqa: E501
"""getTenantDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_dashboards_using_get1_with_http_info(tenant_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str tenant_id: tenantId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_dashboards_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tenant_id' is set
if ('tenant_id' not in params or
params['tenant_id'] is None):
raise ValueError("Missing the required parameter `tenant_id` when calling `get_tenant_dashboards_using_get1`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_dashboards_using_get1`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_dashboards_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in params:
path_params['tenantId'] = params['tenant_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/{tenantId}/dashboards{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_home_dashboard_info_using_get(self, **kwargs): # noqa: E501
"""getTenantHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_home_dashboard_info_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_tenant_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_tenant_home_dashboard_info_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getTenantHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_home_dashboard_info_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_home_dashboard_info_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/dashboard/home/info', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HomeDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_dashboard_customers_using_post(self, body, dashboard_id, **kwargs): # noqa: E501
"""removeDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_customers_using_post(body, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] body: strCustomerIds (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_dashboard_customers_using_post_with_http_info(body, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.remove_dashboard_customers_using_post_with_http_info(body, dashboard_id, **kwargs) # noqa: E501
return data
def remove_dashboard_customers_using_post_with_http_info(self, body, dashboard_id, **kwargs): # noqa: E501
"""removeDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_customers_using_post_with_http_info(body, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] body: strCustomerIds (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_dashboard_customers_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `remove_dashboard_customers_using_post`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `remove_dashboard_customers_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/{dashboardId}/customers/remove', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_dashboard_using_post(self, body, **kwargs): # noqa: E501
"""saveDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_dashboard_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Dashboard body: dashboard (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_dashboard_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_dashboard_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def save_dashboard_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""saveDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_dashboard_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Dashboard body: dashboard (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_dashboard_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_dashboard_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_tenant_home_dashboard_info_using_post(self, body, **kwargs): # noqa: E501
"""setTenantHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_tenant_home_dashboard_info_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param HomeDashboardInfo body: homeDashboardInfo (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_tenant_home_dashboard_info_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.set_tenant_home_dashboard_info_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def set_tenant_home_dashboard_info_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""setTenantHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_tenant_home_dashboard_info_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param HomeDashboardInfo body: homeDashboardInfo (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_tenant_home_dashboard_info_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `set_tenant_home_dashboard_info_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/dashboard/home/info', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_dashboard_from_customer_using_delete(self, customer_id, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_customer_using_delete(customer_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_dashboard_from_customer_using_delete_with_http_info(customer_id, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_dashboard_from_customer_using_delete_with_http_info(customer_id, dashboard_id, **kwargs) # noqa: E501
return data
def unassign_dashboard_from_customer_using_delete_with_http_info(self, customer_id, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_customer_using_delete_with_http_info(customer_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_dashboard_from_customer_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `unassign_dashboard_from_customer_using_delete`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `unassign_dashboard_from_customer_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/dashboard/{dashboardId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_dashboard_from_edge_using_delete(self, edge_id, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_edge_using_delete(edge_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_dashboard_from_edge_using_delete_with_http_info(edge_id, dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_dashboard_from_edge_using_delete_with_http_info(edge_id, dashboard_id, **kwargs) # noqa: E501
return data
def unassign_dashboard_from_edge_using_delete_with_http_info(self, edge_id, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_edge_using_delete_with_http_info(edge_id, dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_dashboard_from_edge_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `unassign_dashboard_from_edge_using_delete`") # noqa: E501
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `unassign_dashboard_from_edge_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/dashboard/{dashboardId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_dashboard_from_public_customer_using_delete(self, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_public_customer_using_delete(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_dashboard_from_public_customer_using_delete_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_dashboard_from_public_customer_using_delete_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def unassign_dashboard_from_public_customer_using_delete_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""unassignDashboardFromPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_dashboard_from_public_customer_using_delete_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_dashboard_from_public_customer_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `unassign_dashboard_from_public_customer_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/public/dashboard/{dashboardId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_dashboard_customers_using_post(self, dashboard_id, **kwargs): # noqa: E501
"""updateDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_dashboard_customers_using_post(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:param list[str] body: strCustomerIds
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_dashboard_customers_using_post_with_http_info(dashboard_id, **kwargs) # noqa: E501
else:
(data) = self.update_dashboard_customers_using_post_with_http_info(dashboard_id, **kwargs) # noqa: E501
return data
def update_dashboard_customers_using_post_with_http_info(self, dashboard_id, **kwargs): # noqa: E501
"""updateDashboardCustomers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_dashboard_customers_using_post_with_http_info(dashboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str dashboard_id: dashboardId (required)
:param list[str] body: strCustomerIds
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_dashboard_customers_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `update_dashboard_customers_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/{dashboardId}/customers', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: api/api_ce/o_auth_2_config_template_controller_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class OAuth2ConfigTemplateControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_client_registration_template_using_delete(self, client_registration_template_id, **kwargs): # noqa: E501
"""deleteClientRegistrationTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_client_registration_template_using_delete(client_registration_template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_registration_template_id: clientRegistrationTemplateId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_client_registration_template_using_delete_with_http_info(client_registration_template_id, **kwargs) # noqa: E501
else:
(data) = self.delete_client_registration_template_using_delete_with_http_info(client_registration_template_id, **kwargs) # noqa: E501
return data
def delete_client_registration_template_using_delete_with_http_info(self, client_registration_template_id, **kwargs): # noqa: E501
"""deleteClientRegistrationTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_client_registration_template_using_delete_with_http_info(client_registration_template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_registration_template_id: clientRegistrationTemplateId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_registration_template_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_client_registration_template_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_registration_template_id' is set
if ('client_registration_template_id' not in params or
params['client_registration_template_id'] is None):
raise ValueError("Missing the required parameter `client_registration_template_id` when calling `delete_client_registration_template_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'client_registration_template_id' in params:
path_params['clientRegistrationTemplateId'] = params['client_registration_template_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/oauth2/config/template/{clientRegistrationTemplateId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_client_registration_templates_using_get(self, **kwargs): # noqa: E501
"""getClientRegistrationTemplates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_client_registration_templates_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[OAuth2ClientRegistrationTemplate]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_client_registration_templates_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_client_registration_templates_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_client_registration_templates_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getClientRegistrationTemplates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_client_registration_templates_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[OAuth2ClientRegistrationTemplate]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_client_registration_templates_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/oauth2/config/template', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[OAuth2ClientRegistrationTemplate]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_client_registration_template_using_post(self, body, **kwargs): # noqa: E501
"""saveClientRegistrationTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_client_registration_template_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OAuth2ClientRegistrationTemplate body: clientRegistrationTemplate (required)
:return: OAuth2ClientRegistrationTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_client_registration_template_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_client_registration_template_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def save_client_registration_template_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""saveClientRegistrationTemplate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_client_registration_template_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OAuth2ClientRegistrationTemplate body: clientRegistrationTemplate (required)
:return: OAuth2ClientRegistrationTemplate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_client_registration_template_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_client_registration_template_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/oauth2/config/template', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OAuth2ClientRegistrationTemplate', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: api/api_ce/rpc_v_2_controller_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class RpcV2ControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_resource_using_delete(self, rpc_id, **kwargs): # noqa: E501
"""deleteResource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_using_delete(rpc_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rpc_id: rpcId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_resource_using_delete_with_http_info(rpc_id, **kwargs) # noqa: E501
else:
(data) = self.delete_resource_using_delete_with_http_info(rpc_id, **kwargs) # noqa: E501
return data
def delete_resource_using_delete_with_http_info(self, rpc_id, **kwargs): # noqa: E501
"""deleteResource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_using_delete_with_http_info(rpc_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rpc_id: rpcId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rpc_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_resource_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rpc_id' is set
if ('rpc_id' not in params or
params['rpc_id'] is None):
raise ValueError("Missing the required parameter `rpc_id` when calling `delete_resource_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rpc_id' in params:
path_params['rpcId'] = params['rpc_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/rpc/persistent/{rpcId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_persisted_rpc_by_device_using_get(self, device_id, page_size, page, rpc_status, **kwargs): # noqa: E501
"""getPersistedRpcByDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_persisted_rpc_by_device_using_get(device_id, page_size, page, rpc_status, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_id: deviceId (required)
:param int page_size: pageSize (required)
:param int page: page (required)
:param str rpc_status: rpcStatus (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRpc
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_persisted_rpc_by_device_using_get_with_http_info(device_id, page_size, page, rpc_status, **kwargs) # noqa: E501
else:
(data) = self.get_persisted_rpc_by_device_using_get_with_http_info(device_id, page_size, page, rpc_status, **kwargs) # noqa: E501
return data
def get_persisted_rpc_by_device_using_get_with_http_info(self, device_id, page_size, page, rpc_status, **kwargs): # noqa: E501
"""getPersistedRpcByDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_persisted_rpc_by_device_using_get_with_http_info(device_id, page_size, page, rpc_status, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_id: deviceId (required)
:param int page_size: pageSize (required)
:param int page: page (required)
:param str rpc_status: rpcStatus (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRpc
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'page_size', 'page', 'rpc_status', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_persisted_rpc_by_device_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_persisted_rpc_by_device_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_persisted_rpc_by_device_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_persisted_rpc_by_device_using_get`") # noqa: E501
# verify the required parameter 'rpc_status' is set
if ('rpc_status' not in params or
params['rpc_status'] is None):
raise ValueError("Missing the required parameter `rpc_status` when calling `get_persisted_rpc_by_device_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'rpc_status' in params:
query_params.append(('rpcStatus', params['rpc_status'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/rpc/persistent/device/{deviceId}{?pageSize,page,rpcStatus,textSearch,sortProperty,sortOrder}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataRpc', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_persisted_rpc_using_get(self, rpc_id, **kwargs): # noqa: E501
"""getPersistedRpc # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_persisted_rpc_using_get(rpc_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rpc_id: rpcId (required)
:return: Rpc
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_persisted_rpc_using_get_with_http_info(rpc_id, **kwargs) # noqa: E501
else:
(data) = self.get_persisted_rpc_using_get_with_http_info(rpc_id, **kwargs) # noqa: E501
return data
def get_persisted_rpc_using_get_with_http_info(self, rpc_id, **kwargs): # noqa: E501
"""getPersistedRpc # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_persisted_rpc_using_get_with_http_info(rpc_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rpc_id: rpcId (required)
:return: Rpc
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rpc_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_persisted_rpc_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rpc_id' is set
if ('rpc_id' not in params or
params['rpc_id'] is None):
raise ValueError("Missing the required parameter `rpc_id` when calling `get_persisted_rpc_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rpc_id' in params:
path_params['rpcId'] = params['rpc_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/rpc/persistent/{rpcId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Rpc', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def handle_one_way_device_rpc_request_using_post1(self, body, device_id, **kwargs): # noqa: E501
"""handleOneWayDeviceRPCRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_one_way_device_rpc_request_using_post1(body, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: requestBody (required)
:param str device_id: deviceId (required)
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.handle_one_way_device_rpc_request_using_post1_with_http_info(body, device_id, **kwargs) # noqa: E501
else:
(data) = self.handle_one_way_device_rpc_request_using_post1_with_http_info(body, device_id, **kwargs) # noqa: E501
return data
def handle_one_way_device_rpc_request_using_post1_with_http_info(self, body, device_id, **kwargs): # noqa: E501
"""handleOneWayDeviceRPCRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_one_way_device_rpc_request_using_post1_with_http_info(body, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: requestBody (required)
:param str device_id: deviceId (required)
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'device_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method handle_one_way_device_rpc_request_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `handle_one_way_device_rpc_request_using_post1`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `handle_one_way_device_rpc_request_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/rpc/oneway/{deviceId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeferredResultResponseEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def handle_two_way_device_rpc_request_using_post1(self, body, device_id, **kwargs): # noqa: E501
"""handleTwoWayDeviceRPCRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_two_way_device_rpc_request_using_post1(body, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: requestBody (required)
:param str device_id: deviceId (required)
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.handle_two_way_device_rpc_request_using_post1_with_http_info(body, device_id, **kwargs) # noqa: E501
else:
(data) = self.handle_two_way_device_rpc_request_using_post1_with_http_info(body, device_id, **kwargs) # noqa: E501
return data
def handle_two_way_device_rpc_request_using_post1_with_http_info(self, body, device_id, **kwargs): # noqa: E501
"""handleTwoWayDeviceRPCRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_two_way_device_rpc_request_using_post1_with_http_info(body, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: requestBody (required)
:param str device_id: deviceId (required)
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'device_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method handle_two_way_device_rpc_request_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `handle_two_way_device_rpc_request_using_post1`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `handle_two_way_device_rpc_request_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/rpc/twoway/{deviceId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeferredResultResponseEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: api/api_ce/rule_chain_controller_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class RuleChainControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def assign_rule_chain_to_edge_using_post(self, edge_id, rule_chain_id, **kwargs): # noqa: E501
"""assignRuleChainToEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_rule_chain_to_edge_using_post(edge_id, rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_rule_chain_to_edge_using_post_with_http_info(edge_id, rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.assign_rule_chain_to_edge_using_post_with_http_info(edge_id, rule_chain_id, **kwargs) # noqa: E501
return data
def assign_rule_chain_to_edge_using_post_with_http_info(self, edge_id, rule_chain_id, **kwargs): # noqa: E501
"""assignRuleChainToEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_rule_chain_to_edge_using_post_with_http_info(edge_id, rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_rule_chain_to_edge_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `assign_rule_chain_to_edge_using_post`") # noqa: E501
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `assign_rule_chain_to_edge_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/ruleChain/{ruleChainId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_rule_chain_using_delete(self, rule_chain_id, **kwargs): # noqa: E501
"""deleteRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_rule_chain_using_delete(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_rule_chain_using_delete_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.delete_rule_chain_using_delete_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def delete_rule_chain_using_delete_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""deleteRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_rule_chain_using_delete_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_rule_chain_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `delete_rule_chain_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def export_rule_chains_using_get(self, limit, **kwargs): # noqa: E501
"""exportRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_rule_chains_using_get(limit, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str limit: limit (required)
:return: RuleChainData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.export_rule_chains_using_get_with_http_info(limit, **kwargs) # noqa: E501
else:
(data) = self.export_rule_chains_using_get_with_http_info(limit, **kwargs) # noqa: E501
return data
def export_rule_chains_using_get_with_http_info(self, limit, **kwargs): # noqa: E501
"""exportRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_rule_chains_using_get_with_http_info(limit, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str limit: limit (required)
:return: RuleChainData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method export_rule_chains_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'limit' is set
if ('limit' not in params or
params['limit'] is None):
raise ValueError("Missing the required parameter `limit` when calling `export_rule_chains_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChains/export{?limit}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChainData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_auto_assign_to_edge_rule_chains_using_get(self, **kwargs): # noqa: E501
"""getAutoAssignToEdgeRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_auto_assign_to_edge_rule_chains_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[RuleChain]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_auto_assign_to_edge_rule_chains_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_auto_assign_to_edge_rule_chains_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_auto_assign_to_edge_rule_chains_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getAutoAssignToEdgeRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_auto_assign_to_edge_rule_chains_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[RuleChain]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_auto_assign_to_edge_rule_chains_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/autoAssignToEdgeRuleChains', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RuleChain]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_rule_chains_using_get(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_rule_chains_using_get(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_edge_rule_chains_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_edge_rule_chains_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
return data
def get_edge_rule_chains_using_get_with_http_info(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_rule_chains_using_get_with_http_info(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_rule_chains_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_edge_rule_chains_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_edge_rule_chains_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_edge_rule_chains_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/ruleChains{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataRuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_latest_rule_node_debug_input_using_get(self, rule_node_id, **kwargs): # noqa: E501
"""getLatestRuleNodeDebugInput # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_latest_rule_node_debug_input_using_get(rule_node_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_node_id: ruleNodeId (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_latest_rule_node_debug_input_using_get_with_http_info(rule_node_id, **kwargs) # noqa: E501
else:
(data) = self.get_latest_rule_node_debug_input_using_get_with_http_info(rule_node_id, **kwargs) # noqa: E501
return data
def get_latest_rule_node_debug_input_using_get_with_http_info(self, rule_node_id, **kwargs): # noqa: E501
"""getLatestRuleNodeDebugInput # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_latest_rule_node_debug_input_using_get_with_http_info(rule_node_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_node_id: ruleNodeId (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_node_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_latest_rule_node_debug_input_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_node_id' is set
if ('rule_node_id' not in params or
params['rule_node_id'] is None):
raise ValueError("Missing the required parameter `rule_node_id` when calling `get_latest_rule_node_debug_input_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_node_id' in params:
path_params['ruleNodeId'] = params['rule_node_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleNode/{ruleNodeId}/debugIn', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rule_chain_by_id_using_get(self, rule_chain_id, **kwargs): # noqa: E501
"""getRuleChainById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chain_by_id_using_get(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rule_chain_by_id_using_get_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.get_rule_chain_by_id_using_get_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def get_rule_chain_by_id_using_get_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""getRuleChainById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chain_by_id_using_get_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rule_chain_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `get_rule_chain_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rule_chain_meta_data_using_get(self, rule_chain_id, **kwargs): # noqa: E501
"""getRuleChainMetaData # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chain_meta_data_using_get(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChainMetaData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rule_chain_meta_data_using_get_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.get_rule_chain_meta_data_using_get_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def get_rule_chain_meta_data_using_get_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""getRuleChainMetaData # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chain_meta_data_using_get_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChainMetaData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rule_chain_meta_data_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `get_rule_chain_meta_data_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}/metadata', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChainMetaData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rule_chains_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chains_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rule_chains_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_rule_chains_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_rule_chains_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rule_chains_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataRuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rule_chains_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_rule_chains_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_rule_chains_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChains{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataRuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def import_rule_chains_using_post(self, body, **kwargs): # noqa: E501
"""importRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_rule_chains_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChainData body: ruleChainData (required)
:param bool overwrite: overwrite
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_rule_chains_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.import_rule_chains_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def import_rule_chains_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""importRuleChains # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_rule_chains_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChainData body: ruleChainData (required)
:param bool overwrite: overwrite
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'overwrite'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_rule_chains_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `import_rule_chains_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'overwrite' in params:
query_params.append(('overwrite', params['overwrite'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChains/import{?overwrite}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_rule_chain_meta_data_using_post(self, body, **kwargs): # noqa: E501
"""saveRuleChainMetaData # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_meta_data_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChainMetaData body: ruleChainMetaData (required)
:return: RuleChainMetaData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_rule_chain_meta_data_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_rule_chain_meta_data_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def save_rule_chain_meta_data_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""saveRuleChainMetaData # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_meta_data_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChainMetaData body: ruleChainMetaData (required)
:return: RuleChainMetaData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_rule_chain_meta_data_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_rule_chain_meta_data_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/metadata', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChainMetaData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_rule_chain_using_post(self, body, **kwargs): # noqa: E501
"""saveRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DefaultRuleChainCreateRequest body: request (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_rule_chain_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_rule_chain_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def save_rule_chain_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""saveRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DefaultRuleChainCreateRequest body: request (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_rule_chain_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_rule_chain_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/device/default', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_rule_chain_using_post1(self, body, **kwargs): # noqa: E501
"""saveRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_using_post1(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChain body: ruleChain (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_rule_chain_using_post1_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_rule_chain_using_post1_with_http_info(body, **kwargs) # noqa: E501
return data
def save_rule_chain_using_post1_with_http_info(self, body, **kwargs): # noqa: E501
"""saveRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_rule_chain_using_post1_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RuleChain body: ruleChain (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_rule_chain_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_rule_chain_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_auto_assign_to_edge_rule_chain_using_post(self, rule_chain_id, **kwargs): # noqa: E501
"""setAutoAssignToEdgeRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_auto_assign_to_edge_rule_chain_using_post(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_auto_assign_to_edge_rule_chain_using_post_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.set_auto_assign_to_edge_rule_chain_using_post_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def set_auto_assign_to_edge_rule_chain_using_post_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""setAutoAssignToEdgeRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_auto_assign_to_edge_rule_chain_using_post_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_auto_assign_to_edge_rule_chain_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `set_auto_assign_to_edge_rule_chain_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}/autoAssignToEdge', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_edge_template_root_rule_chain_using_post(self, rule_chain_id, **kwargs): # noqa: E501
"""setEdgeTemplateRootRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_edge_template_root_rule_chain_using_post(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_edge_template_root_rule_chain_using_post_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.set_edge_template_root_rule_chain_using_post_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def set_edge_template_root_rule_chain_using_post_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""setEdgeTemplateRootRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_edge_template_root_rule_chain_using_post_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_edge_template_root_rule_chain_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `set_edge_template_root_rule_chain_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}/edgeTemplateRoot', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_root_rule_chain_using_post1(self, rule_chain_id, **kwargs): # noqa: E501
"""setRootRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_root_rule_chain_using_post1(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_root_rule_chain_using_post1_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.set_root_rule_chain_using_post1_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def set_root_rule_chain_using_post1_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""setRootRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_root_rule_chain_using_post1_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_root_rule_chain_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `set_root_rule_chain_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}/root', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_script_using_post(self, body, **kwargs): # noqa: E501
"""testScript # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_script_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: inputParams (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_script_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.test_script_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def test_script_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""testScript # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_script_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: inputParams (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_script_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `test_script_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/testScript', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_rule_chain_from_edge_using_delete(self, edge_id, rule_chain_id, **kwargs): # noqa: E501
"""unassignRuleChainFromEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_rule_chain_from_edge_using_delete(edge_id, rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_rule_chain_from_edge_using_delete_with_http_info(edge_id, rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_rule_chain_from_edge_using_delete_with_http_info(edge_id, rule_chain_id, **kwargs) # noqa: E501
return data
def unassign_rule_chain_from_edge_using_delete_with_http_info(self, edge_id, rule_chain_id, **kwargs): # noqa: E501
"""unassignRuleChainFromEdge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_rule_chain_from_edge_using_delete_with_http_info(edge_id, rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_rule_chain_from_edge_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `unassign_rule_chain_from_edge_using_delete`") # noqa: E501
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `unassign_rule_chain_from_edge_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/ruleChain/{ruleChainId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unset_auto_assign_to_edge_rule_chain_using_delete(self, rule_chain_id, **kwargs): # noqa: E501
"""unsetAutoAssignToEdgeRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unset_auto_assign_to_edge_rule_chain_using_delete(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unset_auto_assign_to_edge_rule_chain_using_delete_with_http_info(rule_chain_id, **kwargs) # noqa: E501
else:
(data) = self.unset_auto_assign_to_edge_rule_chain_using_delete_with_http_info(rule_chain_id, **kwargs) # noqa: E501
return data
def unset_auto_assign_to_edge_rule_chain_using_delete_with_http_info(self, rule_chain_id, **kwargs): # noqa: E501
"""unsetAutoAssignToEdgeRuleChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unset_auto_assign_to_edge_rule_chain_using_delete_with_http_info(rule_chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rule_chain_id: ruleChainId (required)
:return: RuleChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unset_auto_assign_to_edge_rule_chain_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_chain_id' is set
if ('rule_chain_id' not in params or
params['rule_chain_id'] is None):
raise ValueError("Missing the required parameter `rule_chain_id` when calling `unset_auto_assign_to_edge_rule_chain_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'rule_chain_id' in params:
path_params['ruleChainId'] = params['rule_chain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/ruleChain/{ruleChainId}/autoAssignToEdge', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: api/api_pe/entity_view_controller_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class EntityViewControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_entity_view_using_delete(self, entity_view_id, **kwargs): # noqa: E501
"""deleteEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_view_using_delete(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: entityViewId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.delete_entity_view_using_delete_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def delete_entity_view_using_delete_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""deleteEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_view_using_delete_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: entityViewId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_view_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `delete_entity_view_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/{entityViewId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_by_query_using_post4(self, body, **kwargs): # noqa: E501
"""findByQuery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_by_query_using_post4(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityViewSearchQuery body: query (required)
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_by_query_using_post4_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.find_by_query_using_post4_with_http_info(body, **kwargs) # noqa: E501
return data
def find_by_query_using_post4_with_http_info(self, body, **kwargs): # noqa: E501
"""findByQuery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_by_query_using_post4_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityViewSearchQuery body: query (required)
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_by_query_using_post4" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `find_by_query_using_post4`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityViews', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityView]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_entity_views_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_views_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_entity_views_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_customer_entity_views_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/entityViews{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_view_by_id_using_get(self, entity_view_id, **kwargs): # noqa: E501
"""getEntityViewById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_by_id_using_get(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_view_by_id_using_get_with_http_info(entity_view_id, **kwargs) # noqa: E501
return data
def get_entity_view_by_id_using_get_with_http_info(self, entity_view_id, **kwargs): # noqa: E501
"""getEntityViewById # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_by_id_using_get_with_http_info(entity_view_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_id: entityViewId (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_view_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_id' is set
if ('entity_view_id' not in params or
params['entity_view_id'] is None):
raise ValueError("Missing the required parameter `entity_view_id` when calling `get_entity_view_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_view_id' in params:
path_params['entityViewId'] = params['entity_view_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/{entityViewId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_view_types_using_get(self, **kwargs): # noqa: E501
"""getEntityViewTypes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_types_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[EntitySubtype]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_entity_view_types_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_entity_view_types_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getEntityViewTypes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_view_types_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[EntitySubtype]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_view_types_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView/types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntitySubtype]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_views_by_entity_group_id_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getEntityViewsByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_views_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
return data
def get_entity_views_by_entity_group_id_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getEntityViewsByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_views_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_views_by_entity_group_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_entity_views_by_entity_group_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/entityViews{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_views_by_ids_using_get(self, entity_view_ids, **kwargs): # noqa: E501
"""getEntityViewsByIds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_views_by_ids_using_get(entity_view_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_ids: entityViewIds (required)
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, **kwargs) # noqa: E501
else:
(data) = self.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, **kwargs) # noqa: E501
return data
def get_entity_views_by_ids_using_get_with_http_info(self, entity_view_ids, **kwargs): # noqa: E501
"""getEntityViewsByIds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_views_by_ids_using_get_with_http_info(entity_view_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_ids: entityViewIds (required)
:return: list[EntityView]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_views_by_ids_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_ids' is set
if ('entity_view_ids' not in params or
params['entity_view_ids'] is None):
raise ValueError("Missing the required parameter `entity_view_ids` when calling `get_entity_views_by_ids_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'entity_view_ids' in params:
query_params.append(('entityViewIds', params['entity_view_ids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityViews{?entityViewIds}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityView]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_entity_view_using_get(self, entity_view_name, **kwargs): # noqa: E501
"""getTenantEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_using_get(entity_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_name: entityViewName (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_entity_view_using_get_with_http_info(entity_view_name, **kwargs) # noqa: E501
return data
def get_tenant_entity_view_using_get_with_http_info(self, entity_view_name, **kwargs): # noqa: E501
"""getTenantEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_view_using_get_with_http_info(entity_view_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_view_name: entityViewName (required)
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_view_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_entity_view_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_view_name' is set
if ('entity_view_name' not in params or
params['entity_view_name'] is None):
raise ValueError("Missing the required parameter `entity_view_name` when calling `get_tenant_entity_view_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'entity_view_name' in params:
query_params.append(('entityViewName', params['entity_view_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/entityViews{?entityViewName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_entity_views_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getTenantEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_views_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_entity_views_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getTenantEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_entity_views_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/entityViews{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_entity_views_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getUserEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_entity_views_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_user_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_user_entity_views_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_user_entity_views_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getUserEntityViews # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_entity_views_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_entity_views_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_user_entity_views_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_user_entity_views_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/user/entityViews{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_entity_view_using_post(self, body, **kwargs): # noqa: E501
"""saveEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_view_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityView body: entityView (required)
:param str entity_group_id: entityGroupId
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_entity_view_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.save_entity_view_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def save_entity_view_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""saveEntityView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_view_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityView body: entityView (required)
:param str entity_group_id: entityGroupId
:return: EntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_entity_view_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_entity_view_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'entity_group_id' in params:
query_params.append(('entityGroupId', params['entity_group_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityView{?entityGroupId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: models/models_ce/entity_data_page_link.py
```python
import pprint
import re # noqa: F401
import six
class EntityDataPageLink(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'page_size': 'int',
'page': 'int',
'text_search': 'str',
'sort_order': 'EntityDataSortOrder',
'dynamic': 'bool'
}
attribute_map = {
'page_size': 'pageSize',
'page': 'page',
'text_search': 'textSearch',
'sort_order': 'sortOrder',
'dynamic': 'dynamic'
}
def __init__(self, page_size=None, page=None, text_search=None, sort_order=None, dynamic=None): # noqa: E501
"""EntityDataPageLink - a model defined in Swagger""" # noqa: E501
self._page_size = None
self._page = None
self._text_search = None
self._sort_order = None
self._dynamic = None
self.discriminator = None
self.page_size = page_size
self.page = page
self.text_search = text_search
self.sort_order = sort_order
self.dynamic = dynamic
@property
def page_size(self):
"""Gets the page_size of this EntityDataPageLink. # noqa: E501
:return: The page_size of this EntityDataPageLink. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this EntityDataPageLink.
:param page_size: The page_size of this EntityDataPageLink. # noqa: E501
:type: int
"""
if page_size is None:
raise ValueError("Invalid value for `page_size`, must not be `None`") # noqa: E501
self._page_size = page_size
@property
def page(self):
"""Gets the page of this EntityDataPageLink. # noqa: E501
:return: The page of this EntityDataPageLink. # noqa: E501
:rtype: int
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this EntityDataPageLink.
:param page: The page of this EntityDataPageLink. # noqa: E501
:type: int
"""
if page is None:
raise ValueError("Invalid value for `page`, must not be `None`") # noqa: E501
self._page = page
@property
def text_search(self):
"""Gets the text_search of this EntityDataPageLink. # noqa: E501
:return: The text_search of this EntityDataPageLink. # noqa: E501
:rtype: str
"""
return self._text_search
@text_search.setter
def text_search(self, text_search):
"""Sets the text_search of this EntityDataPageLink.
:param text_search: The text_search of this EntityDataPageLink. # noqa: E501
:type: str
"""
if text_search is None:
raise ValueError("Invalid value for `text_search`, must not be `None`") # noqa: E501
self._text_search = text_search
@property
def sort_order(self):
"""Gets the sort_order of this EntityDataPageLink. # noqa: E501
:return: The sort_order of this EntityDataPageLink. # noqa: E501
:rtype: EntityDataSortOrder
"""
return self._sort_order
@sort_order.setter
def sort_order(self, sort_order):
"""Sets the sort_order of this EntityDataPageLink.
:param sort_order: The sort_order of this EntityDataPageLink. # noqa: E501
:type: EntityDataSortOrder
"""
if sort_order is None:
raise ValueError("Invalid value for `sort_order`, must not be `None`") # noqa: E501
self._sort_order = sort_order
@property
def dynamic(self):
"""Gets the dynamic of this EntityDataPageLink. # noqa: E501
:return: The dynamic of this EntityDataPageLink. # noqa: E501
:rtype: bool
"""
return self._dynamic
@dynamic.setter
def dynamic(self, dynamic):
"""Sets the dynamic of this EntityDataPageLink.
:param dynamic: The dynamic of this EntityDataPageLink. # noqa: E501
:type: bool
"""
if dynamic is None:
raise ValueError("Invalid value for `dynamic`, must not be `None`") # noqa: E501
self._dynamic = dynamic
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EntityDataPageLink, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntityDataPageLink):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: models/models_pe/allowed_permissions_info.py
```python
import pprint
import re # noqa: F401
import six
class AllowedPermissionsInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allowed_for_group_owner_only_group_operations': 'list[str]',
'allowed_for_group_owner_only_operations': 'list[str]',
'allowed_for_group_role_operations': 'list[str]',
'allowed_resources': 'list[str]',
'operations_by_resource': 'dict(str, list[str])',
'user_owner_id': 'EntityId',
'user_permissions': 'MergedUserPermissions'
}
attribute_map = {
'allowed_for_group_owner_only_group_operations': 'allowedForGroupOwnerOnlyGroupOperations',
'allowed_for_group_owner_only_operations': 'allowedForGroupOwnerOnlyOperations',
'allowed_for_group_role_operations': 'allowedForGroupRoleOperations',
'allowed_resources': 'allowedResources',
'operations_by_resource': 'operationsByResource',
'user_owner_id': 'userOwnerId',
'user_permissions': 'userPermissions'
}
def __init__(self, allowed_for_group_owner_only_group_operations=None, allowed_for_group_owner_only_operations=None, allowed_for_group_role_operations=None, allowed_resources=None, operations_by_resource=None, user_owner_id=None, user_permissions=None): # noqa: E501
"""AllowedPermissionsInfo - a model defined in Swagger""" # noqa: E501
self._allowed_for_group_owner_only_group_operations = None
self._allowed_for_group_owner_only_operations = None
self._allowed_for_group_role_operations = None
self._allowed_resources = None
self._operations_by_resource = None
self._user_owner_id = None
self._user_permissions = None
self.discriminator = None
if allowed_for_group_owner_only_group_operations is not None:
self.allowed_for_group_owner_only_group_operations = allowed_for_group_owner_only_group_operations
if allowed_for_group_owner_only_operations is not None:
self.allowed_for_group_owner_only_operations = allowed_for_group_owner_only_operations
if allowed_for_group_role_operations is not None:
self.allowed_for_group_role_operations = allowed_for_group_role_operations
if allowed_resources is not None:
self.allowed_resources = allowed_resources
if operations_by_resource is not None:
self.operations_by_resource = operations_by_resource
if user_owner_id is not None:
self.user_owner_id = user_owner_id
if user_permissions is not None:
self.user_permissions = user_permissions
@property
def allowed_for_group_owner_only_group_operations(self):
"""Gets the allowed_for_group_owner_only_group_operations of this AllowedPermissionsInfo. # noqa: E501
:return: The allowed_for_group_owner_only_group_operations of this AllowedPermissionsInfo. # noqa: E501
:rtype: list[str]
"""
return self._allowed_for_group_owner_only_group_operations
@allowed_for_group_owner_only_group_operations.setter
def allowed_for_group_owner_only_group_operations(self, allowed_for_group_owner_only_group_operations):
"""Sets the allowed_for_group_owner_only_group_operations of this AllowedPermissionsInfo.
:param allowed_for_group_owner_only_group_operations: The allowed_for_group_owner_only_group_operations of this AllowedPermissionsInfo. # noqa: E501
:type: list[str]
"""
allowed_values = ["ALL", "CREATE", "READ", "WRITE", "DELETE", "RPC_CALL", "READ_CREDENTIALS", "WRITE_CREDENTIALS", "READ_ATTRIBUTES", "WRITE_ATTRIBUTES", "READ_TELEMETRY", "WRITE_TELEMETRY", "ADD_TO_GROUP", "REMOVE_FROM_GROUP", "CHANGE_OWNER", "IMPERSONATE", "CLAIM_DEVICES", "SHARE_GROUP", "ASSIGN_TO_TENANT"] # noqa: E501
if not set(allowed_for_group_owner_only_group_operations).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `allowed_for_group_owner_only_group_operations` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(allowed_for_group_owner_only_group_operations) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._allowed_for_group_owner_only_group_operations = allowed_for_group_owner_only_group_operations
@property
def allowed_for_group_owner_only_operations(self):
"""Gets the allowed_for_group_owner_only_operations of this AllowedPermissionsInfo. # noqa: E501
:return: The allowed_for_group_owner_only_operations of this AllowedPermissionsInfo. # noqa: E501
:rtype: list[str]
"""
return self._allowed_for_group_owner_only_operations
@allowed_for_group_owner_only_operations.setter
def allowed_for_group_owner_only_operations(self, allowed_for_group_owner_only_operations):
"""Sets the allowed_for_group_owner_only_operations of this AllowedPermissionsInfo.
:param allowed_for_group_owner_only_operations: The allowed_for_group_owner_only_operations of this AllowedPermissionsInfo. # noqa: E501
:type: list[str]
"""
allowed_values = ["ALL", "CREATE", "READ", "WRITE", "DELETE", "RPC_CALL", "READ_CREDENTIALS", "WRITE_CREDENTIALS", "READ_ATTRIBUTES", "WRITE_ATTRIBUTES", "READ_TELEMETRY", "WRITE_TELEMETRY", "ADD_TO_GROUP", "REMOVE_FROM_GROUP", "CHANGE_OWNER", "IMPERSONATE", "CLAIM_DEVICES", "SHARE_GROUP", "ASSIGN_TO_TENANT"] # noqa: E501
if not set(allowed_for_group_owner_only_operations).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `allowed_for_group_owner_only_operations` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(allowed_for_group_owner_only_operations) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._allowed_for_group_owner_only_operations = allowed_for_group_owner_only_operations
@property
def allowed_for_group_role_operations(self):
"""Gets the allowed_for_group_role_operations of this AllowedPermissionsInfo. # noqa: E501
:return: The allowed_for_group_role_operations of this AllowedPermissionsInfo. # noqa: E501
:rtype: list[str]
"""
return self._allowed_for_group_role_operations
@allowed_for_group_role_operations.setter
def allowed_for_group_role_operations(self, allowed_for_group_role_operations):
"""Sets the allowed_for_group_role_operations of this AllowedPermissionsInfo.
:param allowed_for_group_role_operations: The allowed_for_group_role_operations of this AllowedPermissionsInfo. # noqa: E501
:type: list[str]
"""
allowed_values = ["ALL", "CREATE", "READ", "WRITE", "DELETE", "RPC_CALL", "READ_CREDENTIALS", "WRITE_CREDENTIALS", "READ_ATTRIBUTES", "WRITE_ATTRIBUTES", "READ_TELEMETRY", "WRITE_TELEMETRY", "ADD_TO_GROUP", "REMOVE_FROM_GROUP", "CHANGE_OWNER", "IMPERSONATE", "CLAIM_DEVICES", "SHARE_GROUP", "ASSIGN_TO_TENANT"] # noqa: E501
if not set(allowed_for_group_role_operations).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `allowed_for_group_role_operations` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(allowed_for_group_role_operations) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._allowed_for_group_role_operations = allowed_for_group_role_operations
@property
def allowed_resources(self):
"""Gets the allowed_resources of this AllowedPermissionsInfo. # noqa: E501
:return: The allowed_resources of this AllowedPermissionsInfo. # noqa: E501
:rtype: list[str]
"""
return self._allowed_resources
@allowed_resources.setter
def allowed_resources(self, allowed_resources):
"""Sets the allowed_resources of this AllowedPermissionsInfo.
:param allowed_resources: The allowed_resources of this AllowedPermissionsInfo. # noqa: E501
:type: list[str]
"""
allowed_values = ["ALL", "PROFILE", "ADMIN_SETTINGS", "ALARM", "DEVICE", "ASSET", "CUSTOMER", "DASHBOARD", "ENTITY_VIEW", "EDGE", "TENANT", "RULE_CHAIN", "USER", "WIDGETS_BUNDLE", "WIDGET_TYPE", "OAUTH2_CONFIGURATION_INFO", "OAUTH2_CONFIGURATION_TEMPLATE", "TENANT_PROFILE", "DEVICE_PROFILE", "CONVERTER", "INTEGRATION", "SCHEDULER_EVENT", "BLOB_ENTITY", "CUSTOMER_GROUP", "DEVICE_GROUP", "ASSET_GROUP", "USER_GROUP", "ENTITY_VIEW_GROUP", "EDGE_GROUP", "DASHBOARD_GROUP", "ROLE", "GROUP_PERMISSION", "WHITE_LABELING", "AUDIT_LOG", "API_USAGE_STATE", "TB_RESOURCE", "OTA_PACKAGE"] # noqa: E501
if not set(allowed_resources).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `allowed_resources` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(allowed_resources) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._allowed_resources = allowed_resources
@property
def operations_by_resource(self):
"""Gets the operations_by_resource of this AllowedPermissionsInfo. # noqa: E501
:return: The operations_by_resource of this AllowedPermissionsInfo. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._operations_by_resource
@operations_by_resource.setter
def operations_by_resource(self, operations_by_resource):
"""Sets the operations_by_resource of this AllowedPermissionsInfo.
:param operations_by_resource: The operations_by_resource of this AllowedPermissionsInfo. # noqa: E501
:type: dict(str, list[str])
"""
allowed_values = [ALL, CREATE, READ, WRITE, DELETE, RPC_CALL, READ_CREDENTIALS, WRITE_CREDENTIALS, READ_ATTRIBUTES, WRITE_ATTRIBUTES, READ_TELEMETRY, WRITE_TELEMETRY, ADD_TO_GROUP, REMOVE_FROM_GROUP, CHANGE_OWNER, IMPERSONATE, CLAIM_DEVICES, SHARE_GROUP, ASSIGN_TO_TENANT] # noqa: E501
if not set(operations_by_resource.keys()).issubset(set(allowed_values)):
raise ValueError(
"Invalid keys in `operations_by_resource` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(operations_by_resource.keys()) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._operations_by_resource = operations_by_resource
@property
def user_owner_id(self):
"""Gets the user_owner_id of this AllowedPermissionsInfo. # noqa: E501
:return: The user_owner_id of this AllowedPermissionsInfo. # noqa: E501
:rtype: EntityId
"""
return self._user_owner_id
@user_owner_id.setter
def user_owner_id(self, user_owner_id):
"""Sets the user_owner_id of this AllowedPermissionsInfo.
:param user_owner_id: The user_owner_id of this AllowedPermissionsInfo. # noqa: E501
:type: EntityId
"""
self._user_owner_id = user_owner_id
@property
def user_permissions(self):
"""Gets the user_permissions of this AllowedPermissionsInfo. # noqa: E501
:return: The user_permissions of this AllowedPermissionsInfo. # noqa: E501
:rtype: MergedUserPermissions
"""
return self._user_permissions
@user_permissions.setter
def user_permissions(self, user_permissions):
"""Sets the user_permissions of this AllowedPermissionsInfo.
:param user_permissions: The user_permissions of this AllowedPermissionsInfo. # noqa: E501
:type: MergedUserPermissions
"""
self._user_permissions = user_permissions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllowedPermissionsInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllowedPermissionsInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: models/models_pe/o_auth2_basic_mapper_config.py
```python
import pprint
import re # noqa: F401
import six
class OAuth2BasicMapperConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'always_full_screen': 'bool',
'customer_name_pattern': 'str',
'default_dashboard_name': 'str',
'email_attribute_key': 'str',
'first_name_attribute_key': 'str',
'last_name_attribute_key': 'str',
'parent_customer_name_pattern': 'str',
'tenant_name_pattern': 'str',
'tenant_name_strategy': 'str',
'user_groups_name_pattern': 'list[str]'
}
attribute_map = {
'always_full_screen': 'alwaysFullScreen',
'customer_name_pattern': 'customerNamePattern',
'default_dashboard_name': 'defaultDashboardName',
'email_attribute_key': 'emailAttributeKey',
'first_name_attribute_key': 'firstNameAttributeKey',
'last_name_attribute_key': 'lastNameAttributeKey',
'parent_customer_name_pattern': 'parentCustomerNamePattern',
'tenant_name_pattern': 'tenantNamePattern',
'tenant_name_strategy': 'tenantNameStrategy',
'user_groups_name_pattern': 'userGroupsNamePattern'
}
def __init__(self, always_full_screen=None, customer_name_pattern=None, default_dashboard_name=None, email_attribute_key=None, first_name_attribute_key=None, last_name_attribute_key=None, parent_customer_name_pattern=None, tenant_name_pattern=None, tenant_name_strategy=None, user_groups_name_pattern=None): # noqa: E501
"""OAuth2BasicMapperConfig - a model defined in Swagger""" # noqa: E501
self._always_full_screen = None
self._customer_name_pattern = None
self._default_dashboard_name = None
self._email_attribute_key = None
self._first_name_attribute_key = None
self._last_name_attribute_key = None
self._parent_customer_name_pattern = None
self._tenant_name_pattern = None
self._tenant_name_strategy = None
self._user_groups_name_pattern = None
self.discriminator = None
if always_full_screen is not None:
self.always_full_screen = always_full_screen
if customer_name_pattern is not None:
self.customer_name_pattern = customer_name_pattern
if default_dashboard_name is not None:
self.default_dashboard_name = default_dashboard_name
if email_attribute_key is not None:
self.email_attribute_key = email_attribute_key
if first_name_attribute_key is not None:
self.first_name_attribute_key = first_name_attribute_key
if last_name_attribute_key is not None:
self.last_name_attribute_key = last_name_attribute_key
if parent_customer_name_pattern is not None:
self.parent_customer_name_pattern = parent_customer_name_pattern
if tenant_name_pattern is not None:
self.tenant_name_pattern = tenant_name_pattern
if tenant_name_strategy is not None:
self.tenant_name_strategy = tenant_name_strategy
if user_groups_name_pattern is not None:
self.user_groups_name_pattern = user_groups_name_pattern
@property
def always_full_screen(self):
"""Gets the always_full_screen of this OAuth2BasicMapperConfig. # noqa: E501
:return: The always_full_screen of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: bool
"""
return self._always_full_screen
@always_full_screen.setter
def always_full_screen(self, always_full_screen):
"""Sets the always_full_screen of this OAuth2BasicMapperConfig.
:param always_full_screen: The always_full_screen of this OAuth2BasicMapperConfig. # noqa: E501
:type: bool
"""
self._always_full_screen = always_full_screen
@property
def customer_name_pattern(self):
"""Gets the customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:return: The customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._customer_name_pattern
@customer_name_pattern.setter
def customer_name_pattern(self, customer_name_pattern):
"""Sets the customer_name_pattern of this OAuth2BasicMapperConfig.
:param customer_name_pattern: The customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._customer_name_pattern = customer_name_pattern
@property
def default_dashboard_name(self):
"""Gets the default_dashboard_name of this OAuth2BasicMapperConfig. # noqa: E501
:return: The default_dashboard_name of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._default_dashboard_name
@default_dashboard_name.setter
def default_dashboard_name(self, default_dashboard_name):
"""Sets the default_dashboard_name of this OAuth2BasicMapperConfig.
:param default_dashboard_name: The default_dashboard_name of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._default_dashboard_name = default_dashboard_name
@property
def email_attribute_key(self):
"""Gets the email_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:return: The email_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._email_attribute_key
@email_attribute_key.setter
def email_attribute_key(self, email_attribute_key):
"""Sets the email_attribute_key of this OAuth2BasicMapperConfig.
:param email_attribute_key: The email_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._email_attribute_key = email_attribute_key
@property
def first_name_attribute_key(self):
"""Gets the first_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:return: The first_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._first_name_attribute_key
@first_name_attribute_key.setter
def first_name_attribute_key(self, first_name_attribute_key):
"""Sets the first_name_attribute_key of this OAuth2BasicMapperConfig.
:param first_name_attribute_key: The first_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._first_name_attribute_key = first_name_attribute_key
@property
def last_name_attribute_key(self):
"""Gets the last_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:return: The last_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._last_name_attribute_key
@last_name_attribute_key.setter
def last_name_attribute_key(self, last_name_attribute_key):
"""Sets the last_name_attribute_key of this OAuth2BasicMapperConfig.
:param last_name_attribute_key: The last_name_attribute_key of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._last_name_attribute_key = last_name_attribute_key
@property
def parent_customer_name_pattern(self):
"""Gets the parent_customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:return: The parent_customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._parent_customer_name_pattern
@parent_customer_name_pattern.setter
def parent_customer_name_pattern(self, parent_customer_name_pattern):
"""Sets the parent_customer_name_pattern of this OAuth2BasicMapperConfig.
:param parent_customer_name_pattern: The parent_customer_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._parent_customer_name_pattern = parent_customer_name_pattern
@property
def tenant_name_pattern(self):
"""Gets the tenant_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:return: The tenant_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._tenant_name_pattern
@tenant_name_pattern.setter
def tenant_name_pattern(self, tenant_name_pattern):
"""Sets the tenant_name_pattern of this OAuth2BasicMapperConfig.
:param tenant_name_pattern: The tenant_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
self._tenant_name_pattern = tenant_name_pattern
@property
def tenant_name_strategy(self):
"""Gets the tenant_name_strategy of this OAuth2BasicMapperConfig. # noqa: E501
:return: The tenant_name_strategy of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: str
"""
return self._tenant_name_strategy
@tenant_name_strategy.setter
def tenant_name_strategy(self, tenant_name_strategy):
"""Sets the tenant_name_strategy of this OAuth2BasicMapperConfig.
:param tenant_name_strategy: The tenant_name_strategy of this OAuth2BasicMapperConfig. # noqa: E501
:type: str
"""
allowed_values = ["DOMAIN", "EMAIL", "CUSTOM"] # noqa: E501
if tenant_name_strategy not in allowed_values:
raise ValueError(
"Invalid value for `tenant_name_strategy` ({0}), must be one of {1}" # noqa: E501
.format(tenant_name_strategy, allowed_values)
)
self._tenant_name_strategy = tenant_name_strategy
@property
def user_groups_name_pattern(self):
"""Gets the user_groups_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:return: The user_groups_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:rtype: list[str]
"""
return self._user_groups_name_pattern
@user_groups_name_pattern.setter
def user_groups_name_pattern(self, user_groups_name_pattern):
"""Sets the user_groups_name_pattern of this OAuth2BasicMapperConfig.
:param user_groups_name_pattern: The user_groups_name_pattern of this OAuth2BasicMapperConfig. # noqa: E501
:type: list[str]
"""
self._user_groups_name_pattern = user_groups_name_pattern
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OAuth2BasicMapperConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OAuth2BasicMapperConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
|
{
"source": "jernsthausen/datesplitter",
"score": 3
}
|
#### File: site-packages/parserator/data_prep_utils.py
```python
from lxml import etree
import os
import csv
# appends a labeled list to an existing xml file
# calls: appendListToXML, stripFormatting
def appendListToXMLfile(labeled_list, module, filepath):
# format for labeled_list: [ [ (token, label), (token, label), ...],
# [ (token, label), (token, label), ...],
# [ (token, label), (token, label), ...],
# ... ]
if os.path.isfile(filepath):
with open( filepath, 'r+' ) as f:
tree = etree.parse(filepath)
collection_XML = tree.getroot()
collection_XML = stripFormatting(collection_XML)
else:
collection_tag = module.GROUP_LABEL
collection_XML = etree.Element(collection_tag)
parent_tag = module.PARENT_LABEL
collection_XML = appendListToXML(labeled_list, collection_XML, parent_tag)
with open(filepath, 'w') as f :
f.write(etree.tostring(collection_XML, pretty_print = True))
# given a list of labeled sequences to an xml list,
# appends corresponding xml to existing xml
# calls: sequence2XML
# called by: appendListToXMLfile
def appendListToXML(list_to_append, collection_XML, parent_tag) :
# format for list_to_append: [ [ (token, label), (token, label), ...],
# [ (token, label), (token, label), ...],
# [ (token, label), (token, label), ...],
# ... ]
for labeled_sequence in list_to_append:
sequence_xml = sequence2XML(labeled_sequence, parent_tag)
collection_XML.append(sequence_xml)
return collection_XML
# given a labeled sequence, generates xml for that sequence
# called by: appendListToXML
def sequence2XML(labeled_sequence, parent_tag) :
# format for labeled_sequence: [(token, label), (token, label), ...]
sequence_xml = etree.Element(parent_tag)
for token, label in labeled_sequence:
component_xml = etree.Element(label)
component_xml.text = token
component_xml.tail = ' '
sequence_xml.append(component_xml)
sequence_xml[-1].tail = ''
return sequence_xml
# clears formatting for an xml collection
def stripFormatting(collection) :
collection.text = None
for element in collection :
element.text = None
element.tail = None
return collection
# writes a list of strings to a file
def list2file(string_list, filepath):
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile, doublequote=True, quoting=csv.QUOTE_MINIMAL)
for string in string_list:
writer.writerow([string.encode('utf-8')])
```
#### File: site-packages/parserator/main.py
```python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
from . import manual_labeling
from . import training
import os
import shutil
import fileinput
from .parser_template import init_template, setup_template, test_tokenize_template
def dispatch():
parser = ArgumentParser(description="")
parser_subparsers = parser.add_subparsers()
sub_label = parser_subparsers.add_parser('label')
sub_train = parser_subparsers.add_parser('train')
sub_init = parser_subparsers.add_parser('init')
sub_label.add_argument(dest='infile', help='input csv filepath for the label task')
sub_label.add_argument(dest='outfile', help='output xml filepath for the label task')
sub_label.add_argument(dest='modulename', help='parser module name')
sub_label.set_defaults(func=label)
sub_train.add_argument(dest='traindata', help='comma separated xml filepaths, or "path/to/traindata/*.xml"')
sub_train.add_argument(dest='modulename', help='parser module name')
sub_train.set_defaults(func=train)
sub_init.add_argument(dest='modulename', help='module name for a new parser')
sub_init.set_defaults(func=init)
args = parser.parse_args()
args.func(args)
def label(args) :
if args.infile and args.outfile:
module = __import__(args.modulename)
infile_path = args.infile
outfile_path = args.outfile
manual_labeling.label(module, infile_path, outfile_path)
else:
print('Please specify an input csv file [--infile FILE] and an output xml file [--outfile FILE]')
def train(args) :
if args.traindata:
if args.traindata.endswith('*.xml'):
train_data_dir = args.traindata[:-5]
if not train_data_dir:
train_data_dir = '.'
train_file_list = []
for filename in os.listdir(train_data_dir):
if filename.endswith('.xml'):
train_file_list.append(args.traindata[:-5]+filename)
else:
train_file_list = args.traindata.split(',')
module = __import__(args.modulename)
training.train(module, train_file_list)
else:
print('Please specify one or more xml training files (comma separated) [--trainfile FILE]')
def init(args) :
name = args.modulename
data = "raw"
training = "training"
tests = 'tests'
dirs_to_mk = [name, data, training, tests]
print('\nInitializing directories for %s' %name)
for directory in dirs_to_mk:
if not os.path.exists(directory):
os.mkdir(directory)
print('* %s' %directory)
print('\nGenerating __init__.py')
init_path = name + '/__init__.py'
if os.path.exists(init_path):
print(' warning: %s already exists' %init_path)
else:
with open(init_path, "w") as f:
f.write(init_template())
print('* %s' %init_path)
print('\nGenerating setup.py')
if os.path.exists('setup.py'):
print(' warning: setup.py already exists')
else:
with open('setup.py', 'w') as f:
f.write(setup_template(name))
print('* setup.py')
print('\nGenerating test file')
token_test_path = tests+'/test_tokenizing.py'
if os.path.exists(token_test_path):
print(' warning: %s already exists' %token_test_path)
else:
with open(token_test_path, 'w') as f:
f.write(test_tokenize_template(name))
print('* %s' %token_test_path)
```
#### File: site-packages/parserator/training.py
```python
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
import pycrfsuite
import random
import os
from lxml import etree
from imp import reload
from . import data_prep_utils
import re
import time
def trainModel(training_data, module,
params_to_set={'c1':0.1, 'c2':0.01, 'feature.minfreq':0}):
X = []
Y = []
for raw_string, components in training_data:
tokens, labels = list(zip(*components))
X.append(module.tokens2features(tokens))
Y.append(labels)
# train model
trainer = pycrfsuite.Trainer(verbose=False, params=params_to_set)
for xseq, yseq in zip(X, Y):
trainer.append(xseq, yseq)
trainer.train(module.__name__+'/'+module.MODEL_FILE)
# given a list of xml training filepaths & a parser module,
# reads the xml & returns training data (for trainModel)
def readTrainingData( xml_infile_list, collection_tag ):
full_xml = etree.Element(collection_tag)
component_string_list = []
# loop through xml training files
for xml_infile in xml_infile_list:
train_data_filepath = xml_infile
if os.path.isfile(train_data_filepath):
with open( train_data_filepath, 'r+' ) as f:
tree = etree.parse(f)
file_xml = tree.getroot()
file_xml = data_prep_utils.stripFormatting(file_xml)
for component_etree in file_xml:
# etree components to string representations
component_string_list.append(etree.tostring(component_etree).decode('utf-8'))
else:
print('WARNING: %s does not exist' % xml_infile)
# get rid of duplicates in string representations
component_string_list = list(set(component_string_list))
# loop through unique string representations
for component_string in component_string_list:
# convert string representation back to xml
sequence_xml = etree.fromstring(component_string)
raw_text = etree.tostring(sequence_xml, method='text', encoding='utf-8')
sequence_components = []
for component in list(sequence_xml):
sequence_components.append([component.text, component.tag])
yield raw_text, sequence_components
def renameModelFile(old_model):
if os.path.exists(old_model):
t = time.gmtime(os.path.getctime(old_model))
time_str = '_'+str(t.tm_year)+'_'+str(t.tm_mon)+'_'+str(t.tm_mday)+'_'+str(t.tm_hour)+str(t.tm_min)+str(t.tm_sec)
renamed = re.sub('.crfsuite', time_str+'.crfsuite', old_model)
print("\nrenaming old model: %s -> %s" %(old_model, renamed))
os.rename(old_model, renamed)
def train(module, train_file_list) :
training_data = list(readTrainingData(train_file_list, module.GROUP_LABEL))
if not training_data:
print('ERROR: No training data found. Perhaps double check your training data filepaths?')
return
model_path = module.__name__+'/'+module.MODEL_FILE
renameModelFile(model_path)
print('\ntraining model on {num} training examples from {file_list}'.format(num=len(training_data), file_list=train_file_list))
trainModel(training_data, module)
print('\ndone training! model file created: {path}'.format(path=model_path))
```
|
{
"source": "jernsting/nxt_gem",
"score": 3
}
|
#### File: gem/embedding/node2vec.py
```python
import os
import numpy as np
from subprocess import call
from gem.embedding.static_graph_embedding import StaticGraphEmbedding
from gem.utils import graph_util
class node2vec(StaticGraphEmbedding):
hyper_params = {
'method_name': 'node2vec_rw'
}
def __init__(self, *args, **kwargs):
""" Initialize the node2vec class
Args:
d: dimension of the embedding
max_iter: max iterations
walk_len: length of random walk
num_walks: number of random walks
con_size: context size
ret_p: return weight
inout_p: inout weight
"""
super(node2vec, self).__init__(*args, **kwargs)
def learn_embedding(self, graph=None,
is_weighted=False, no_python=False):
current_dir = os.path.dirname(os.path.abspath(__file__))
executable = os.path.abspath(os.path.join(current_dir, '../c_exe/node2vec'))
args = [executable]
if not graph:
raise ValueError('graph needed')
graph_util.saveGraphToEdgeListTxtn2v(graph, 'tempGraph.graph')
args.append("-i:tempGraph.graph")
args.append("-o:tempGraph.emb")
args.append("-d:%d" % self._d)
args.append("-l:%d" % self._walk_len)
args.append("-r:%d" % self._num_walks)
args.append("-k:%d" % self._con_size)
args.append("-e:%d" % self._max_iter)
args.append("-p:%f" % self._ret_p)
args.append("-q:%f" % self._inout_p)
args.append("-v")
args.append("-dr")
args.append("-w")
try:
call(args)
except Exception as e: # pragma: no cover
print(str(e))
raise FileNotFoundError('./node2vec not found. Please compile snap, place node2vec in the system path '
'and grant executable permission')
self._X = graph_util.loadEmbedding('tempGraph.emb')
return self._X
def get_edge_weight(self, i, j):
return np.dot(self._X[i, :], self._X[j, :])
```
#### File: gem/evaluation/evaluate_graph_reconstruction.py
```python
import pickle
from gem.evaluation import metrics
from gem.utils import evaluation_util, graph_util
import networkx as nx
import numpy as np
def evaluateStaticGraphReconstruction(digraph, graph_embedding,
X_stat, node_l=None, file_suffix=None,
sample_ratio_e=None, is_undirected=True,
is_weighted=False):
node_num = len(digraph.nodes)
# evaluation
if sample_ratio_e:
eval_edge_pairs = evaluation_util.get_random_edge_pairs(
node_num,
sample_ratio_e,
is_undirected
)
else:
eval_edge_pairs = None
if file_suffix is None:
estimated_adj = graph_embedding.get_reconstructed_adj(X_stat, node_l)
else:
estimated_adj = graph_embedding.get_reconstructed_adj(
X_stat,
file_suffix,
node_l
)
predicted_edge_list = evaluation_util.get_edge_list_from_adj_mtrx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=eval_edge_pairs
)
MAP = metrics.computeMAP(predicted_edge_list, digraph, is_undirected=is_undirected)
prec_curv, _ = metrics.computePrecisionCurve(predicted_edge_list, digraph)
# If weighted, compute the error in reconstructed weights of observed edges
if is_weighted:
digraph_adj = nx.to_numpy_matrix(digraph)
estimated_adj[digraph_adj == 0] = 0
err = np.linalg.norm(digraph_adj - estimated_adj)
err_baseline = np.linalg.norm(digraph_adj)
else:
err = None
err_baseline = None
return MAP, prec_curv, err, err_baseline
```
|
{
"source": "jernsting/useful_layers",
"score": 3
}
|
#### File: useful_layers/layers/channel_attention.py
```python
import torch
from torch.nn import functional as F
from useful_layers.utils import reduction_network
from useful_layers.layers.ABCLayer import Layer
__all__ = ['ChannelAttention2D', 'ChannelAttention3D']
class _ChannelAttention(Layer):
def __init__(self):
super(_ChannelAttention, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
size = x.size()
if isinstance(self, ChannelAttention2D):
view = (size[0], size[1], 1, 1)
elif isinstance(self, ChannelAttention3D):
view = (size[0], size[1], 1, 1, 1)
else:
raise NotImplementedError(f'Expected to be ChannelAttention2D or -3D, got {self}')
avg_comp = torch.mean(x.view(size[0], size[1], -1), dim=-1).view(*view)
max_comp = torch.max(x.view(size[0], size[1], -1), dim=-1).values.view(*view)
avg_comp = self.conv2(F.relu(self.conv1(avg_comp)))
max_comp = self.conv2(F.relu(self.conv1(max_comp)))
return F.sigmoid(avg_comp + max_comp)
class ChannelAttention2D(_ChannelAttention):
"""ChannelAttention2D
Channel attention layer as presented in
https://arxiv.org/pdf/1807.06521v2.pdf.
"""
def __init__(self,
in_channels: int,
reduction: int = 2):
"""Create ChannelAttention2D Layer
Args:
in_channels (int): Number of input channels
reduction (int, optional): Degree of reduction. Defaults to 2.
"""
super(ChannelAttention2D, self).__init__()
self.conv1, self.conv2 = reduction_network(in_channels, reduction, "2d")
class ChannelAttention3D(_ChannelAttention):
"""ChannelAttention3D
Channel attention layer as presented in
https://arxiv.org/pdf/1807.06521v2.pdf.
"""
def __init__(self,
in_channels: int,
reduction: int = 2):
"""Create ChannelAttention3D Layer
Args:
in_channels (int): Number of input channels
reduction (int, optional): Degree of reduction. Defaults to 2.
"""
super(ChannelAttention3D, self).__init__()
self.conv1, self.conv2 = reduction_network(in_channels, reduction, "3d")
```
#### File: useful_layers/layers/spatial_attention.py
```python
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
from useful_layers.layers.ABCLayer import Layer
__all__ = ['SpatialAttention2D', 'SpatialAttention3D']
class _SpatialAttention(Layer, ABC):
def __init__(self):
super(_SpatialAttention, self).__init__()
self.spacial_conv = self.conv(in_channels=2,
kernel_size=self.kernel_size,
out_channels=1,
stride=1,
dilation=1,
groups=1,
bias=False,
padding=(self.kernel_size - 1) // 2)
if self.batch_norm:
self.batch_norm = self.batch_norm(1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
avg_comp = torch.max(x, 1).values.unsqueeze(1)
max_comp = torch.mean(x, 1).unsqueeze(1)
conv_input = torch.cat((avg_comp, max_comp), dim=1)
attention_map = self.spacial_conv(conv_input)
if self.batch_norm:
attention_map = self.batch_norm(attention_map)
attention_map = F.sigmoid(attention_map)
return attention_map
class SpatialAttention2D(_SpatialAttention):
"""Simple spatial attention layer
Implementation based on: https://arxiv.org/abs/1807.06521v2
"""
def __init__(self,
in_channels: int,
kernel_size: int = 7,
batch_norm: bool = True):
"""Create new SpatialAttention Layer
Args:
in_channels: Number of input channels
kernel_size: Kernel size to use (integer or tuple of int)
batch_norm: If true batch normalization is applied. Defaults to True.
"""
self.in_channels = in_channels
self.kernel_size = kernel_size
self.batch_norm = None
if batch_norm:
self.batch_norm = nn.BatchNorm2d
self.conv = nn.Conv2d
super(SpatialAttention2D, self).__init__()
class SpatialAttention3D(_SpatialAttention):
"""Simple spatial attention layer
Implementation based on: https://arxiv.org/abs/1807.06521v2
"""
def __init__(self,
in_channels: int,
kernel_size: int = 7,
batch_norm: bool = True):
"""Create a SpatialAttention3D layer
Args:
in_channels: Number of input channels
kernel_size: Kernel size to use (integer or tuple of int)
batch_norm: If true batch normalization is applied. Defaults to True.
"""
self.in_channels = in_channels
self.kernel_size = kernel_size
self.batch_norm = None
if batch_norm:
self.batch_norm = nn.BatchNorm3d
self.conv = nn.Conv3d
super(SpatialAttention3D, self).__init__()
```
|
{
"source": "jernst/mf2py",
"score": 3
}
|
#### File: mf2py/mf2py/http_server.py
```python
from __future__ import print_function
import os
from mf2py import Parser
from flask import Flask, Response, request
app = Flask(__name__)
@app.route("/", methods=["GET"])
def index():
resp = """<!DOCTYPE HTML>
<html><head><title>mf2py</title></head>
<body><form action="/parse" method="get">
<h1>mf2py test</h1>
URL: <input type="text" name="url" /> <input type="submit" /></body></html>
"""
return Response(resp, status=200, mimetype="text/html")
@app.route("/parse", methods=["GET", "POST"])
def parse():
if request.method == 'GET':
u = request.args['url']
else:
u = request.form['url']
print(u)
p = Parser(url=unicode(u))
return Response(p.to_json(pretty_print=True), status=200,
mimetype='application/json')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 33507)))
```
#### File: mf2py/test/test_parser.py
```python
from __future__ import unicode_literals, print_function
import os.path
import re
import sys
import mock
from nose.tools import assert_equal, assert_true, assert_false
from mf2py import Parser
from unittest import TestCase
TestCase.maxDiff = None
if sys.version < '3':
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
def parse_fixture(path, url=None):
with open(os.path.join("test/examples/", path)) as f:
p = Parser(doc=f, url=url, html_parser='html5lib')
return p.to_dict()
def test_empty():
p = Parser()
assert_true(type(p) is not None)
assert_true(type(p.to_dict()) is dict)
def test_open_file():
p = Parser(doc=open("test/examples/empty.html"))
assert_true(p.__doc__ is not None)
assert_true(type(p) is not None)
assert_true(type(p.to_dict()) is dict)
@mock.patch('requests.get')
def test_user_agent(getter):
ua_expect = 'mf2py - microformats2 parser for python'
assert_true(Parser.useragent.startswith(ua_expect))
resp = mock.MagicMock()
resp.content = b''
resp.text = ''
resp.headers = {}
getter.return_value = resp
Parser(url='http://example.com')
getter.assert_called_with('http://example.com', headers={
'User-Agent': Parser.useragent
})
Parser.useragent = 'something else'
assert_equal(Parser.useragent, 'something else')
# set back to default. damn stateful classes
Parser.useragent = 'mf2py - microformats2 parser for python'
def test_base():
p = Parser(doc=open("test/examples/base.html"))
assert_equal(p.__url__, "http://tantek.com/")
def test_simple_parse():
result = parse_fixture("simple_person_reference.html")
assert_equal(result["items"][0]["properties"],
{'name': ['<NAME>']})
def test_simple_person_reference_implied():
p = Parser(doc=open("test/examples/simple_person_reference_implied.html"))
result = p.to_dict()
assert_equal(result["items"][0]["properties"],
{'name': ['<NAME>']})
def test_simple_person_reference_same_element():
result = parse_fixture("simple_person_reference_same_element.html")
assert_equal(result["items"][0]["properties"],
{'name': ['<NAME>']})
def test_person_with_url():
p = Parser(doc=open("test/examples/person_with_url.html"))
result = p.to_dict()
assert_equal(result["items"][0]["properties"]["name"],
['<NAME>'])
assert_equal(result["items"][0]["properties"]["url"],
['http://tommorris.org/'])
def test_vcp():
result = parse_fixture("value_class_person.html")
assert_equal(result["items"][0]["properties"]["tel"], ['+44 1234 567890'])
def test_multiple_root_classnames():
result = parse_fixture("nested_multiple_classnames.html")
# order does not matter
assert_equal(len(result["items"]), 1)
assert_equal(set(result["items"][0]["type"]),
set(["h-entry", "h-as-note"]))
def test_property_nested_microformat():
result = parse_fixture("nested_multiple_classnames.html")
assert_equal(len(result["items"]), 1)
assert "author" in result["items"][0]["properties"]
assert_equal(
result["items"][0]["properties"]["author"][0]["properties"]["name"][0],
"<NAME>")
assert_equal(
result["items"][0]["properties"]["reviewer"][0]
["properties"]["name"][0],
"<NAME>")
assert_equal(
result["items"][0]["properties"]["author"][0]
["properties"]["adr"][0]["properties"]["city"][0],
"London")
def test_plain_child_microformat():
result = parse_fixture("nested_multiple_classnames.html")
assert_equal(len(result["items"]), 1)
assert_true("children" in result["items"][0])
assert_equal(len(result["items"][0]["children"]), 1)
assert_equal(
result["items"][0]["children"][0]["properties"]["name"][0],
"Some Citation")
def test_implied_name():
result = parse_fixture("implied_properties.html")
for i in range(6):
assert_equal(result["items"][i]["properties"]["name"][0], "<NAME>")
def test_implied_url():
result = parse_fixture("implied_properties.html", url="http://foo.com/")
assert_equal(
result["items"][1]["properties"]["url"][0], "http://tommorris.org/")
# img should not have a "url" property
assert_true("url" not in result["items"][4]["properties"])
# href="" is relative to the base url
assert_equal(result["items"][5]["properties"]["url"][0], "http://foo.com/")
def test_implied_nested_photo():
result = parse_fixture("implied_properties.html", url="http://bar.org")
assert_equal(result["items"][2]["properties"]["photo"][0],
"http://tommorris.org/photo.png")
# src="" is relative to the base url
assert_equal(result["items"][5]["properties"]["photo"][0],
"http://bar.org")
def test_implied_nested_photo_alt_name():
result = parse_fixture("implied_properties.html")
assert_equal(result["items"][3]["properties"]["name"][0], "<NAME>")
def test_implied_image():
result = parse_fixture("implied_properties.html")
assert_equal(result["items"][4]["properties"]["photo"][0],
"http://tommorris.org/photo.png")
assert_equal(result["items"][4]["properties"]["name"][0], "<NAME>")
def test_datetime_parsing():
result = parse_fixture("datetimes.html")
assert_equal(result["items"][0]["properties"]["start"][0],
"2014-01-01T12:00:00+00:00")
assert_equal(result["items"][0]["properties"]["end"][0],
"3014-01-01T18:00:00+00:00")
assert_equal(result["items"][0]["properties"]["duration"][0],
"P1000Y")
assert_equal(result["items"][0]["properties"]["updated"][0],
"2011-08-26T00:01:21+00:00")
assert_equal(result["items"][0]["properties"]["updated"][1],
"2011-08-26T00:01:21+00:00")
def test_datetime_vcp_parsing():
result = parse_fixture("datetimes.html")
assert_equal(result["items"][1]["properties"]["published"][0],
"3014-01-01T01:21:00Z")
assert_equal(result["items"][2]["properties"]["updated"][0],
"2014-03-11 09:55:00")
assert_equal(result["items"][3]["properties"]["published"][0],
"2014-01-30T15:28:00")
assert_equal(result["items"][4]["properties"]["published"][0],
"9999-01-14T11:52:00+08:00")
assert_equal(result["items"][5]["properties"]["published"][0],
"2014-06-01T12:30:00-06:00")
def test_dt_end_implied_date():
"""Test that events with dt-start and dt-end use the implied date rule
http://microformats.org/wiki/value-class-pattern#microformats2_parsers
for times without dates"""
result = parse_fixture("datetimes.html")
event_wo_tz = result["items"][6]
assert_equal(event_wo_tz["properties"]["start"][0],
"2014-05-21T18:30:00")
assert_equal(event_wo_tz["properties"]["end"][0],
"2014-05-21T19:30:00")
event_w_tz = result["items"][7]
assert_equal(event_w_tz["properties"]["start"][0],
"2014-06-01T12:30:00-06:00")
assert_equal(event_w_tz["properties"]["end"][0],
"2014-06-01T19:30:00-06:00")
def test_embedded_parsing():
result = parse_fixture("embedded.html")
assert_equal(
result["items"][0]["properties"]["content"][0]["html"],
'\n <p>Blah blah blah blah blah.</p>\n' +
' <p>Blah.</p>\n <p>Blah blah blah.</p>\n ')
assert_equal(
result["items"][0]["properties"]["content"][0]["value"],
'\n Blah blah blah blah blah.\n Blah.\n Blah blah blah.\n ')
def test_backcompat():
result = parse_fixture("backcompat.html")
assert_true('h-entry' in result['items'][0]['type'])
assert_equal('<NAME>',
result['items'][0]['properties']
['author'][0]['properties']['name'][0])
assert_equal('A Title',
result['items'][0]['properties']['name'][0])
assert_equal('Some Content',
result['items'][0]['properties']['content'][0]['value'])
def test_hoisting_nested_hcard():
result = parse_fixture("nested_hcards.html")
expected = {
'items': [
{
'properties': {
'author': [
{
'properties': {'name': ['KP1']},
'type': ['h-card'],
'value': 'KP1'
}
],
'in-reply-to': [
{
'properties': {'name': ['KP']},
'type': ['h-cite'],
'value': 'KP'
}
],
'name': ['KP\n KP1']
},
'type': ['h-entry']
}
],
'rels': {},
'rel-urls': {}
}
assert_equal(['KP\n KP1'], result['items'][0]['properties']['name'])
assert_equal(expected, result)
def test_html_tag_class():
result = parse_fixture("hfeed_on_html_tag.html")
assert_equal(['h-feed'], result['items'][0]['type'])
assert_equal(['entry1'], result['items'][0]['children'][0]
['properties']['name'])
assert_equal(['entry2'], result['items'][0]['children'][1]
['properties']['name'])
def test_string_strip():
result = parse_fixture("string_stripping.html")
assert result["items"][0]["properties"]["name"][0] == "<NAME>"
def test_template_parse():
result = parse_fixture("template_tag.html")
assert len(result["items"]) == 0
def test_backcompat_hproduct():
result = parse_fixture("backcompat_hproduct.html")
assert len(result["items"]) == 1
assert result["items"][0]["type"] == ["h-product"]
assert result["items"][0]["properties"]["category"] == ['bullshit']
expect1 = ['Quacktastic Products']
assert result["items"][0]["properties"]["brand"] == expect1
assert result["items"][0]["properties"]["identifier"] == ['#BULLSHIT-001']
expect2 = "Magical tasty sugar pills that don't do anything."
assert result["items"][0]["properties"]['description'][0] == expect2
expect3 = ["Tom's Magical Quack Tincture"]
assert result["items"][0]["properties"]["name"] == expect3
def test_backcompat_hproduct_nested_hreview():
result = parse_fixture("backcompat_hproduct_hreview_nested.html")
assert result["items"][0]["children"][0]['type'] == ['h-review']
assert type(result["items"][0]["children"][0]
['properties']['name'][0]) == text_type
def test_backcompat_rel_bookmark():
"""Confirm that rel=bookmark inside of an h-entry is converted
to u-url.
"""
result = parse_fixture('backcompat_feed_with_rel_bookmark.html')
for ii, url in enumerate((
'/2014/11/24/jump-rope',
'/2014/11/23/graffiti',
'/2014/11/21/earth',
'/2014/11/19/labor',
)):
assert result['items'][ii]['type'] == ['h-entry']
assert result['items'][ii]['properties']['url'] == [url]
def test_backcompat_rel_tag():
"""Confirm that rel=tag inside of an h-entry is converted
to a p-category and the last path segment of the href is used.
"""
result = parse_fixture('backcompat_hentry_with_rel_tag.html')
assert result['items'][0]['properties']['category'] == ['cat', 'dog',
'mountain lion']
def test_area_uparsing():
result = parse_fixture("area.html")
assert result["items"][0]["properties"] == {
'url': ['http://suda.co.uk'], 'name': ['<NAME>']}
assert 'shape' in result["items"][0].keys()
assert 'coords' in result["items"][0].keys()
def test_src_equiv():
result = parse_fixture("test_src_equiv.html")
for item in result["items"]:
assert 'x-example' in item['properties'].keys()
assert 'http://example.org/' == item['properties']['x-example'][0]
def test_rels():
result = parse_fixture("rel.html")
assert result['rels'] == {
u'in-reply-to': [u'http://example.com/1', u'http://example.com/2'],
u'author': [u'http://example.com/a', u'http://example.com/b'],
u'alternate': [u'http://example.com/fr'],
u'home': [u'http://example.com/fr'],
}
assert result['rel-urls'] == {
u'http://example.com/1': {'text': u"post 1", "rels": [u'in-reply-to']},
u'http://example.com/2': {'text': u"post 2", "rels": [u'in-reply-to']},
u'http://example.com/a': {'text': u"author a", "rels": [u'author']},
u'http://example.com/b': {'text': u"author b", "rels": [u'author']},
u'http://example.com/fr': {'text': u'French mobile homepage',
u'media': u'handheld',
u'rels': [u'alternate', u'home'],
u'hreflang': u'fr'}
}
def test_alternates():
result = parse_fixture("rel.html")
assert result['alternates'] == [{
'url': 'http://example.com/fr', 'media': 'handheld',
'text': 'French mobile homepage',
'rel': 'home', 'hreflang': 'fr'
}]
def test_enclosures():
result = parse_fixture("rel_enclosure.html")
assert result['rels'] == {'enclosure': ['http://example.com/movie.mp4']}
assert result['rel-urls'] == {'http://example.com/movie.mp4': {
'rels': ['enclosure'],
'text': 'my movie',
'type': 'video/mpeg'}
}
def test_empty_href():
result = parse_fixture("hcard_with_empty_url.html", "http://foo.com")
for hcard in result['items']:
assert hcard['properties'].get('url') == ['http://foo.com']
def test_link_with_u_url():
result = parse_fixture("link_with_u-url.html", "http://foo.com")
assert_equal({
"type": ["h-card"],
"properties": {
"name": [""],
"url": ["http://foo.com/"],
},
}, result["items"][0])
def test_complex_e_content():
"""When parsing h-* e-* properties, we should fold {"value":..., "html":...}
into the parsed microformat object, instead of nesting it under an
unnecessary second layer of "value":
"""
result = Parser(doc="""<!DOCTYPE html><div class="h-entry">
<div class="h-card e-content"><p>Hello</p></div></div>""").to_dict()
assert_equal({
"type": ["h-entry"],
"properties": {
"content": [{
"type": [
"h-card"
],
"properties": {
"name": ["Hello"]
},
"html": "<p>Hello</p>",
"value": "Hello"
}],
"name": ["Hello"]
}
}, result["items"][0])
def test_nested_values():
"""When parsing nested microformats, check that value is the value of
the simple property element"""
result = parse_fixture("nested_values.html")
entry = result["items"][0]
assert_equal({
'properties': {
'name': ['Kyle'],
'url': ['http://about.me/kyle'],
},
'value': 'Kyle',
'type': ['h-card'],
}, entry["properties"]["author"][0])
assert_equal({
'properties': {
'name': ['foobar'],
'url': ['http://example.com/foobar'],
},
'value': 'http://example.com/foobar',
'type': ['h-cite'],
}, entry["properties"]["like-of"][0])
assert_equal({
'properties': {
'name': ['George'],
'url': ['http://people.com/george'],
},
'type': ['h-card'],
}, entry["children"][0])
def test_implied_name_empty_alt():
"""An empty alt text should not prevent us from including other
children in the implied name.
"""
p = Parser(doc="""
<a class="h-card" href="https://twitter.com/kylewmahan">
<img src="https://example.org/test.jpg" alt="">
@kylewmahan
</a>""").to_dict()
hcard = p['items'][0]
assert_equal({
'type': ['h-card'],
'properties': {
'name': ['@kylewmahan'],
'url': ['https://twitter.com/kylewmahan'],
'photo': ['https://example.org/test.jpg'],
},
}, hcard)
def test_implied_properties_silo_pub():
result = parse_fixture('silopub.html')
item = result['items'][0]
implied_name = item['properties']['name'][0]
implied_name = re.sub('\s+', ' ', implied_name).strip()
assert_equal('@kylewmahan on Twitter', implied_name)
def test_relative_datetime():
result = parse_fixture("implied_relative_datetimes.html")
assert_equal('2015-01-02T05:06:00',
result[u'items'][0][u'properties'][u'updated'][0])
def assert_unicode_everywhere(obj):
if isinstance(obj, dict):
for k, v in obj.items():
assert_false(isinstance(k, binary_type),
'key=%r; type=%r' % (k, type(k)))
assert_unicode_everywhere(v)
elif isinstance(obj, list):
for v in obj:
assert_unicode_everywhere(v)
assert_false(isinstance(obj, binary_type),
'value=%r; type=%r' % (obj, type(obj)))
def check_unicode(filename, jsonblob):
assert_unicode_everywhere(jsonblob)
def test_unicode_everywhere():
for h in os.listdir("test/examples"):
result = parse_fixture(h)
yield check_unicode, h, result
```
|
{
"source": "jeroanan/Gyroscope",
"score": 2
}
|
#### File: Gyroscope/Config/Settings.py
```python
def __get_setting(site_config, app_config, key, default_value):
return site_config.get(key, app_config.get(key, default_value))
def get_logfile_location(app_config):
return app_config.get("logfile_location", "")
def get_log_level(app_config):
app_config.get("log_level", 20)
def should_get_pages(site_config, app_config):
return __get_setting(site_config, app_config, "get_pages", True)
def should_log_too_big(site_config, app_config):
return __get_setting(site_config, app_config, "log_too_big", True)
def should_log_too_slow(site_config, app_config):
return __get_setting(site_config, app_config, "log_too_slow", True)
```
#### File: jeroanan/Gyroscope/GetSite.py
```python
def get_site(site, config):
import Request
import GetAssets
import GetPage
from Config import Settings
from Uri import UriBuilder
from functools import partial
http_request = Request.get_request(site["uri"], site, config, "index")
GetAssets.get_assets(http_request.data, site, UriBuilder.join_uri(site["uri"], ""), config)
if Settings.should_get_pages(site, config):
list(map(partial(GetPage.request_page, site=site, config=config), site.get("pages", [])))
```
#### File: jeroanan/Gyroscope/gyroscope.py
```python
from collections import ChainMap
import logging
import time
import sys
import GetArgs
import GetSite
from Config import Defaults
from Config import LoadSites
from Config import LoadConfig
from Config import Settings
def work():
def init_config():
args = GetArgs.get_args()
return ChainMap(args, LoadConfig.load_config(args.get("config"), args.get("no_config")), Defaults.get_defaults())
def init_logging():
logfile_location = Settings.get_logfile_location(config)
if logfile_location == "" or config.get("no_logfile", False):
logging.basicConfig(level=config["log_level"], format="%(asctime)s %(message)s")
else:
logging.basicConfig(filename=logfile_location, level=config["log_level"],
filemode=config["logfile_mode"], format="%(asctime)s %(message)s")
def get_site(site):
def site_disabled():
return site.get("disabled", False)
if not site_disabled():
GetSite.get_site(site, config)
config = init_config()
init_logging()
logging.info("Start")
start_time = time.time()
list(map(get_site, LoadSites.load_sites(config["sites_file"])))
logging.info("End (total time: %d seconds)" % (time.time() - start_time))
try:
work()
except KeyboardInterrupt:
logging.shutdown()
sys.exit(0)
```
#### File: Gyroscope/HttpStatuses/LogStatus.py
```python
import logging
from HttpStatuses import Status200
def log_status(http_request, uri, page_description, time_elapsed, site, config):
def log_400():
logging.error("Bad request: %s" % uri)
def log_403():
logging.error("Access denied: %s" % uri)
def log_404():
logging.error("Missing page: %s" % uri)
def log_200():
Status200.log_ok_status(uri, page_description, http_request.tell() / 1024, time_elapsed, site, config)
def log_500():
logging.critical("Error: %s" % uri)
def log_default():
logging.warning("%s (%s): %s" % (uri, page_description, http_request.status))
status_methods = {
400: log_400,
403: log_403,
404: log_404,
200: log_200,
500: log_500
}
status_methods.get(http_request.status, log_default)()
```
|
{
"source": "jerocobo/LegalStream",
"score": 4
}
|
#### File: jerocobo/LegalStream/epg.py
```python
import datetime
import string
import math
def ToDay():
global year
year = datetime.datetime.now().year
global month
month = '%02d' % datetime.datetime.now().month
global day
day = '%02d' % datetime.datetime.today().day
global hour
hour = '%02d' % datetime.datetime.now().hour
global minute
minute = '%02d' % datetime.datetime.now().minute
global second
second = '%02d' % datetime.datetime.now().second
global numbers
numbers = str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
ToDay()
StartYear = int(year)
StartMonth = int(month)
StartDay = int(day)
StartHour = int(hour)
StartMinute = int(minute)
StartSecond = int(second)
ToDay()
EndYear = int(year)
EndMonth = int(month)
EndDay = int(day)
EndHour = int(hour)
EndMinute = int(minute)
EndSecond = int(second)
MinuteLength = EndMinute - StartMinute
SecondLength = EndSecond - StartSecond
def DoubleDigit(Integer):
return "%02d"%Integer
def PlusOneDay():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
month = DoubleDigit(month)
numbers = str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
def RetPlusOneDay():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
month = DoubleDigit(month)
return str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
def RetPlusOneHour():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
if hour is 23:
hour = DoubleDigit(0)
else:
hour = hour + 1
hour = DoubleDigit(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
if month is 11:
month = DoubleDigit(0)
year = year + 1
else:
month = DoubleDigit(month + 1)
return str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
print RetPlusOneHour()
DoubleDigit(8)
prompt = raw_input("Are you sure you want to run this program? Avg. run time: 1m 25s.")
if "yes" in prompt:
pass
elif "Yes" in prompt:
pass
elif "y" in prompt:
pass
elif "Y" in prompt:
pass
elif "yeah" in prompt:
pass
elif "Yeah" in prompt:
pass
elif "ok" in prompt:
pass
elif "OK" in prompt:
pass
elif "okay" in prompt:
pass
elif "Okay" in prompt:
pass
else:
exit()
def ABC1():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 1 Schedule Complete."
return Program
ABC1 = ABC1()
def ABC2():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN2"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 2 Schedule Complete."
return Program
ABC2 = ABC2()
def ABC3():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN3"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 3 Schedule Complete."
return Program
ABC3 = ABC3()
def ABC4():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN4"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 4 Schedule Complete."
return Program
ABC4 = ABC4()
def ABC5():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN5"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 5 Schedule Complete."
return Program
#File = open('workfile', 'w')
Filee = '<?xml version="1.0" encoding="utf-8" ?><!DOCTYPE tv SYSTEM "http://www.teleguide.info/download/xmltv.dtd"><tv generator-info-name="LegalStream Python EPG Generator" generator-info-url=https://github.com/notanewbie/LegalStream/blob/master/epg.py"><channel id="300093"><display-name lang="en">France 24</display-name></channel><channel id="ABCN1"><display-name lang="en">ABC News Digital 1</display-name></channel><channel id="ABCN2"><display-name lang="en">ABC News Digital 2</display-name></channel><channel id="ABCN3"><display-name lang="en">ABC News Digital 3</display-name></channel><channel id="ABCN4"><display-name lang="en">ABC News Digital 4</display-name></channel><channel id="ABCN5"><display-name lang="en">ABC News Digital 5</display-name></channel>'
i = 0
for object in ABC1:
Filee = Filee + ABC1[i]
i = i + 1
i = 0
for object in ABC2:
Filee = Filee + ABC2[i]
i = i + 1
i = 0
for object in ABC3:
Filee = Filee + ABC3[i]
i = i + 1
i = 0
for object in ABC4:
Filee = Filee + ABC4[i]
i = i + 1
file_ = open('output.xml', 'w')
file_.write(Filee + "</tv>")
file_.close()
ToDay()
EndYear = int(year)
EndMonth = int(month)
EndDay = int(day)
EndHour = int(hour)
EndMinute = int(minute)
EndSecond = int(second)
MinuteLength = EndMinute - StartMinute
SecondLength = EndSecond - StartSecond
print "Generating EPG data took " + str(MinuteLength) + "m and " + str(SecondLength) + "s."
```
|
{
"source": "jerod2000/pytest",
"score": 3
}
|
#### File: jerod2000/pytest/test.py
```python
import os
import shutil
#搜索apk文件并复制到目标目录
def checkApkFile (srcPath,destPath):
files = os.listdir(srcPath)
for file in files:
if file.lower().endswith(".apk"):
oldFile=srcPath+"\\"+file
newFile=destPath+"\\"+file
if os.path.exists(newFile):
os.remove(newFile)
print("copy " + file,end=" ")
shutil.copyfile(oldFile,newFile)
print("success")
#搜索release目录并在其目录下检测apk文件
def checkReleaseDir (srcPath,destPath):
files = os.listdir(srcPath)
isTragetDir=False
for file in files:
if "release" == file.lower():
releaseDir=srcPath+"\\"+file
checkApkFile(releaseDir,destPath)
isTragetDir=True
if isTragetDir:#删除目录
shutil.rmtree(srcPath,True)
#返回当前工作目录
curPath=os.getcwd()
#打印当前工作目录
print(curPath)
#创建目录
releasePath=curPath+"\\Release"
if not os.path.exists(releasePath):
os.mkdir(releasePath)
#收集当前目录所有的文件及目录
files=os.listdir(curPath)
#遍历
for f1 in files:
filePath = curPath + "\\" + f1
if os.path.isdir(filePath):#如果是目录,则进一步检测目录下是否有Release目录
checkReleaseDir(filePath,releasePath)
print("end")
```
|
{
"source": "jerodray/Checkers",
"score": 3
}
|
#### File: jerodray/Checkers/SingleMove.py
```python
import model
import view
class SingleMove:
def __init__(self, friendly, friendly_color, enemy, enemy_color):
self.board = model.board
self.checkers = model.checkers
self.buildGame(friendly, friendly_color, enemy, enemy_color)
self.view(view.win1)
view.runAI(True)
# print("(click once on the left checkerboard to close the application)")
# click1 = view.win1.getMouse()
input("Press Enter to close application...")
def buildGame(self, friendly, friendly_color, enemy, enemy_color):
# Init Board ("Pieces" = Squares on the board)
for x in range(0, 8):
for y in range(0, 8):
self.board[x, y] = model.Piece(x * 62.5, y * 62.5)
# Set checker color correctly
view.color_ai = friendly_color
view.color_opponent = enemy_color
# Place Checkers ("Checkers" = Circular Plastic Guys)
for f in friendly:
self.addChecker(f[0], f[1], f[2], True)
for e in enemy:
self.addChecker(e[0], e[1], e[2], False)
def view(self, window):
view.drawBoard(window)
view.drawCheckers(window)
def addChecker(self, x, y, is_king, is_friendly):
checker = model.Checker()
checker.id = (x, y)
checker.index = x * 8 + (y + 1)
checker.black = is_friendly
checker.x = x
checker.y = y
checker.king = is_king
self.board[x, y].checker = checker
self.checkers.append(checker)
```
|
{
"source": "Jeroen0494/docker-suricata",
"score": 2
}
|
#### File: Jeroen0494/docker-suricata/wrapper.py
```python
import sys
import os
import argparse
DEFAULT_IMAGE = "jasonish/suricata"
DEFAULT_TAG = "latest"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--image", default=DEFAULT_IMAGE,
help="Docker image (default: {})".format(DEFAULT_IMAGE))
parser.add_argument(
"--tag", default=DEFAULT_TAG,
help="Docker image tag (default: {})".format(DEFAULT_TAG))
parser.add_argument(
"--net", default="host", help="Docker networking type (default: host)")
parser.add_argument(
"remainder", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
parser.add_argument(
"--podman", action="store_true", default=False, help="Use podman")
parser.add_argument(
"-e", "--env", action="append", default=[], help="Set environment variable")
args = parser.parse_args()
runner = "docker"
if args.podman:
runner = "podman"
volumes = []
user_mode = False
log_dir = None
suricata_args = []
while args.remainder:
arg = args.remainder.pop(0)
if arg == "--":
continue
elif arg == "-S":
v = args.remainder.pop(0)
volumes += [
"-v", "{}:{}".format(v, v)
]
suricata_args += [arg, v]
elif arg == "-r":
v = args.remainder.pop(0)
volumes += [
"-v", "{}:{}".format(v, v)
]
suricata_args += [arg, v]
user_mode = True
elif arg == "-l":
v = args.remainder.pop(0)
if not v.startswith("/"):
v = "{}/{}".format(os.getcwd(), v)
volumes += [
"-v", "{}:/var/log/suricata".format(v)
]
suricata_args += ["-l", "/var/log/suricata"]
log_dir = v
else:
suricata_args += [arg]
docker_args = [
runner,
"run",
"--net", args.net,
"--rm",
"-it",
"--cap-add", "sys_nice",
"--cap-add", "net_admin",
"-e", "PUID={}".format(getuid()),
"-e", "PGID={}".format(getgid()),
]
for e in args.env:
docker_args += ["-e", e]
if user_mode and log_dir is None:
volumes += ["-v", "{}:/work".format(os.getcwd())]
docker_args += ["-w", "/work"]
docker_args += volumes
docker_args += [
"{}:{}".format(DEFAULT_IMAGE, args.tag)
]
docker_args += suricata_args
print(" ".join(docker_args))
os.execvp(docker_args[0], docker_args)
def getuid():
if os.getenv("SUDO_UID") != None:
return os.getenv("SUDO_UID")
return os.getuid()
def getgid():
if os.getenv("SUDO_GID") != None:
return os.getenv("SUDO_GID")
return os.getgid()
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jeroen11dijk/Choreography",
"score": 3
}
|
#### File: choreography/choreos/test_choreo.py
```python
import cmath
import math
from dataclasses import dataclass
from typing import List
import numpy
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.structures.game_interface import GameInterface
from rlutilities.linear_algebra import vec3, rotation, dot, vec2, look_at, mat3, norm, normalize, \
xy, axis_to_rotation, euler_to_rotation
from rlutilities.simulation import Input
from choreography.choreography_main import Choreography
from choreography.drone import Drone
from choreography.group_step import BlindBehaviorStep, DroneListStep, PerDroneStep, \
StateSettingStep, TwoTickStateSetStep
from choreography.utils.img_to_shape import convert_img_to_shape
from .examples import YeetTheBallOutOfTheUniverse, FormACircle, Wait, FlyUp
# HEX FLIP COOL CLIP
class HexDoubleFlip(Choreography):
@staticmethod
def get_num_bots():
return 6
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
HexSetup(),
BoostUntilFast(),
BackflipBoostyThing()
]
class HexSetup(StateSettingStep):
radius = 300
center = vec3(-2000, 0, 100)
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
angle = i * math.pi * 2 / len(drones)
rot = rotation(angle)
v = vec3(dot(rot, vec2(1, 0)))
drone.position = v * self.radius + self.center
drone.orientation = look_at(vec3(2, 0, 3), vec3(1, 0, 0))
drone.velocity = vec3(0, 0, 500)
drone.angular_velocity = vec3(0, 0, 0)
class BoostUntilFast(DroneListStep):
def step(self, packet: GameTickPacket, drones: List[Drone]):
self.finished = norm(drones[0].velocity) > 1000
for drone in drones:
drone.controls.pitch = 0
drone.controls.boost = True
class BackflipBoostyThing(BlindBehaviorStep):
duration = 6.0
def set_controls(self, controls: Input):
controls.pitch = 0.5
controls.boost = True
# AUTOMATIC STATE SETTING INTO DRAWING
class Dickbutt(Choreography):
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
Drawing('ChoreographyHive/assets/dickbutt.png', origin=vec3(-1000, 1500, 18)),
Wait(1.0)
]
class Drawing(TwoTickStateSetStep):
def __init__(self, image, origin=vec3(0, 0, 18)):
super().__init__()
self.origin = origin
self.shape = convert_img_to_shape(image)
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
if i < len(self.shape):
drone.position = self.origin + self.shape[i]
drone.orientation = mat3(1, 0, 0, 0, 1, 0, 0, 0, 1)
drone.velocity = vec3(0, 0, 0)
else:
drone.position = vec3(0, 0, 3000)
# CIRCLES AND SPHERE FORMATION TESTS
class CirclesAndSpheres(Choreography):
@staticmethod
def get_num_bots():
return 45
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
FormACircle(),
Wait(1.0),
FlyUp(),
HoverSpinUp(),
HoverSpinDown(),
SphereFormation(),
HoverOrbit()
]
class HoverSpinUp(PerDroneStep):
duration = 6.0
def step(self, packet: GameTickPacket, drone: Drone, index: int):
drone.hover.up = normalize(drone.position)
clockwise_rotation = axis_to_rotation(vec3(0, 0, self.time_since_start / 4))
position_on_circle = normalize(xy(drone.position)) * (2000 - self.time_since_start * 200)
drone.hover.target = dot(clockwise_rotation, position_on_circle)
drone.hover.target[2] = 1000
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
class HoverSpinDown(PerDroneStep):
duration = 6.0
def step(self, packet: GameTickPacket, drone: Drone, index: int):
drone.hover.up = normalize(drone.position)
clockwise_rotation = axis_to_rotation(vec3(0, 0, 1.5 - self.time_since_start / 4))
position_on_circle = normalize(xy(drone.position)) * (800 + self.time_since_start * 200)
drone.hover.target = dot(clockwise_rotation, position_on_circle)
drone.hover.target[2] = 1000
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
class SphereFormation(DroneListStep):
duration = 12.0
separation_duration = 3.0
recirculation_start = 6.5
radius_shrink_start = 3.0
radius_shrink_duration = 6.0
layers = [
[0, 16],
[1, 2, 17, 18, 32, 33],
[3, 4, 5, 19, 20, 21, 34, 35, 36],
[6, 7, 8, 9, 22, 23, 24, 25, 37, 38, 39, 40],
[10, 11, 12, 26, 27, 28, 41, 42, 43],
[13, 14, 29, 30, 44, 45],
[15, 31]
]
heights = [
1500,
1400,
1250,
1000,
750,
600,
500,
]
radii = [
200,
450,
600,
650,
600,
450,
200,
]
def step(self, packet: GameTickPacket, drones: List[Drone]):
for drone in drones:
drone.hover.up = normalize(drone.position)
for i, layer in enumerate(self.layers):
if drone.id in layer:
# Calculate radius
if self.time_since_start < self.radius_shrink_start:
radius = 2000
elif self.time_since_start < self.radius_shrink_start + self.radius_shrink_duration:
diff = 2000 - self.radii[i]
radius = 2000 - diff * (
(self.time_since_start - self.radius_shrink_start) / self.radius_shrink_duration)
else:
radius = self.radii[i]
# Calculate xy position
if self.time_since_start > self.recirculation_start:
a = layer.index(drone.id)
angle = a * math.pi * 2 / len(layer)
rot = rotation(angle)
pos_xy = vec3(dot(rot, vec2(1, 0)))
else:
pos_xy = xy(drone.position)
# Combine xy and radius
drone.hover.target = normalize(pos_xy) * radius
# Get height
if self.time_since_start < self.separation_duration:
diff = 1000 - self.heights[i]
height = 1000 - diff * (self.time_since_start / self.separation_duration)
else:
height = self.heights[i]
drone.hover.target[2] = height
break
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
class HoverOrbit(PerDroneStep):
duration = 8.0
layers = [
[0, 16],
[1, 2, 17, 18, 32, 33],
[3, 4, 5, 19, 20, 21, 34, 35, 36],
[6, 7, 8, 9, 22, 23, 24, 25, 37, 38, 39, 40],
[10, 11, 12, 26, 27, 28, 41, 42, 43],
[13, 14, 29, 30, 44, 45],
[15, 31]
]
heights = [
1500,
1400,
1250,
1000,
750,
600,
500,
]
radii = [
200,
450,
600,
650,
600,
450,
200,
]
def step(self, packet: GameTickPacket, drone: Drone, index: int):
for i, layer in enumerate(self.layers):
if index in layer:
drone.hover.up = normalize(drone.position)
clockwise_rotation = axis_to_rotation(vec3(0, 0, 0.3))
position_on_circle = normalize(xy(drone.position)) * self.radii[i]
drone.hover.target = dot(clockwise_rotation, position_on_circle)
drone.hover.target[2] = self.heights[i]
break
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
# DOUBLE HELIX
class DoubleHelix(Choreography):
@staticmethod
def get_appearances(num_bots: int) -> List[str]:
appearances = ['WillRedBlue.cfg'] * num_bots
# appearances[0::4] = ['WillYellowGreen.cfg'] * round(num_bots / 4)
# appearances[1::4] = ['WillYellowGreen.cfg'] * round(num_bots / 4)
return appearances
@staticmethod
def get_teams(num_bots: int) -> List[int]:
# Every other bot is on the orange team.
teams = [0] * num_bots
teams[1::2] = [1] * round(num_bots / 2)
return teams
@staticmethod
def get_num_bots():
return 32
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
TwoLineSetup(),
Wait(1.0),
ForwardThenHelix()
]
class TwoLineSetup(StateSettingStep):
y_distance = 500
x_distance = 300
gap_offset = 300
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
angle = (-1) ** i * -math.pi / 2
x = -self.x_distance * (-1) ** i
y = (self.y_distance + self.gap_offset * (i // 2)) * (-1) ** i
drone.position = vec3(x, y, 20)
drone.orientation = euler_to_rotation(vec3(0, angle, 0))
drone.velocity = vec3(0, 0, 0)
drone.angular_velocity = vec3(0, 0, 0)
class ForwardThenHelix(PerDroneStep):
duration = 13.0
radius = 500
def step(self, packet: GameTickPacket, drone: Drone, index: int):
if drone.position[2] < 25:
drone.since_jumped = 0.0
# Go forward
drone.controls.throttle = 1.0 if abs(drone.velocity[1]) < 500 else 0.01
# If near half-line
if abs(drone.position[1]) < 200:
drone.controls.jump = True
else:
drone.since_jumped += self.dt
height = 50 + drone.since_jumped * 150
angle = 1.0 + drone.since_jumped * 1.2
if index % 2 == 0: angle += math.pi
rot = rotation(angle)
v = vec3(dot(rot, vec2(1, 0)))
drone.hover.target = v * self.radius
drone.hover.target[2] = height
drone.hover.up = normalize(drone.position)
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
# F(X,Y) GRAPHER
class GraphTest(Choreography):
@staticmethod
def get_num_bots():
return 64
@staticmethod
def get_appearances(num_bots: int) -> List[str]:
return 64 * ['graph.cfg']
# @staticmethod
# def get_teams(num_bots: int) -> List[int]:
# teams = [0] * num_bots
# return teams
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
Grid(),
BaseGraph(),
Wave(),
Water(),
BaseGraph(),
Saddle(),
BaseGraph(),
Pants(),
# Parabola(),
# CosSin(),
# WindMill(),
# YeetEquation(),
# Limit(),
# Jochem(),
# LogarithmReal(),
]
class Grid(TwoTickStateSetStep):
spacing = 200
def set_drone_states(self, drones: List[Drone]):
s = int(math.sqrt(len(drones))) # Side length
for i, drone in enumerate(drones):
# Get grid pos.
x = (i // s) - (s - 1) / 2
y = (i % s) - (s - 1) / 2
drone.position = vec3(x * self.spacing, y * self.spacing, 800) # 800 is base height
drone.orientation = euler_to_rotation(vec3(math.pi / 2, 0, 0))
drone.velocity = vec3(0, 0, 100)
drone.angular_velocity = vec3(0, 0, 0)
class BaseGraph(DroneListStep):
duration = 2
rotation_speed = 0
spacing = 200
def func(self, x, y):
return 0
def step(self, packet: GameTickPacket, drones: List[Drone]):
s = int(math.sqrt(len(drones))) # Side length
for i, drone in enumerate(drones):
# Get grid pos.
x = (i // s) - (s - 1) / 2
y = (i % s) - (s - 1) / 2
# Get height from func.
z = 800 + self.func(x, y) # 800 is base height
drone.hover.target = vec3(x * self.spacing, y * self.spacing, z)
rot = rotation(self.rotation_speed * self.time_since_start * 2)
drone.hover.up = vec3(dot(rot, vec2(1, 0)))
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
class Parabola(BaseGraph):
def func(self, x, y):
return 40 * (x ** 2 + y ** 2) - 200
class CosSin(BaseGraph):
def func(self, x, y):
return 250 * (math.cos(x) + math.sin(y))
class WindMill(BaseGraph):
duration = 4 * math.pi
def func(self, x, y):
t = self.time_since_start
return 1000 * (numpy.sign(x * y) * numpy.sign(1 - (x * 9) ** 2 + (y * 9) ** 2) / 9)
class Wave(BaseGraph):
duration = 2 * math.pi
def func(self, x, y):
t = self.time_since_start
return 150 * (math.sin(x / 2 + 2 * t))
class YeetEquation(BaseGraph):
duration = 5
def func(self, x, y):
t = self.time_since_start
t_0 = 2
c = 0.5
a = 1 / (4 * math.pi * c * (t + t_0))
b = -(x ** 2 + y ** 2) / (4 * c * (t + t_0))
return 20000 * a * math.exp(b)
class Water(BaseGraph):
duration = 2 * math.pi
def func(self, x, y):
t = self.time_since_start
return 250 * (math.sin(x / 2 + t)) * (math.cos(y / 2 + t))
class Saddle(BaseGraph):
duration = 4 * math.pi
def func(self, x, y):
t = self.time_since_start
return 4 * x * y * t * math.cos(t)
class Jochem(BaseGraph):
duration = 4
def func(self, x, y):
t = self.time_since_start
return 300 * t * x / (x ** 2 + y ** 2 + 0.3)
class Limit(BaseGraph):
duration = 4 * math.pi
def func(self, x, y):
t = self.time_since_start
return 10 * t * math.cos(t) * x / (y + 0.001)
class Will(BaseGraph):
duration = 5
def func(self, x, y):
t = self.time_since_start
return 10 * (math.sin(1.5 * t) - 0.5) * (x ** 2 + y ** 2)
class LogarithmReal(BaseGraph):
duration = 4 * math.pi
def func(self, x, y):
t = self.time_since_start
return 200 * math.cos(t) * (cmath.sqrt(x + y * 1j)).real
class Pants(BaseGraph):
duration = 4 * math.pi
def func(self, x, y):
t = self.time_since_start
return 275 * math.sin(t) * (cmath.sqrt(x + y * 1j)).imag
# HARDCODED CLONES
class Clones(Choreography):
@staticmethod
def get_num_bots():
return 10
def __init__(self, game_interface: GameInterface):
super().__init__(game_interface)
def generate_sequence(self):
self.sequence = [
YeetTheBallOutOfTheUniverse(),
StackThemUp(),
GoForwardAndThenDoAJumpOrSomething()
]
class StackThemUp(StateSettingStep):
pos = vec3(0, -2000, 20)
height = 50
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
drone.position = self.pos
drone.position[2] += i * self.height
drone.orientation = euler_to_rotation(vec3(0, math.pi / 2, 0))
drone.velocity = vec3(0, 0, 0)
drone.angular_velocity = vec3(0, 0, 0)
@dataclass
class MovementInInterval:
start: float
end: float
controls: Input
# Pass in a list of MovementInIntervals and it automatically completes the moves with each drone.
# If you have the temptation to use clone_delay = 0, use BlindBehaviourStep instead.
class HardcodedMovement(PerDroneStep):
def __init__(self, movements: List[MovementInInterval], clone_delay: float = 1.0):
self.movements = movements
self.clone_delay = clone_delay
super().__init__()
def step(self, packet: GameTickPacket, drone: Drone, index: int):
delay = index * self.clone_delay
for movement in self.movements:
if movement.start + delay < self.time_since_start < movement.end + delay:
drone.controls = movement.controls
if index == packet.num_cars - 1:
self.finished = self.time_since_start > delay + self.movements[-1].end
class GoForwardAndThenDoAJumpOrSomething(HardcodedMovement):
def __init__(self):
a = Input()
a.throttle = True
b = Input()
b.jump = True
b.pitch = 1.0
movements = [
MovementInInterval(0.0, 3.0, a),
MovementInInterval(3.0, 4.2, b)
]
super().__init__(movements, clone_delay=0.8)
# Unused cool sphere
class CoolSphere(PerDroneStep):
duration = 30.0
height = 1100
radius = 850
unwind_start_time = 10.0
max_frequency = 30.0
def step(self, packet: GameTickPacket, drone: Drone, index: int):
if self.time_since_start > self.unwind_start_time:
f = self.max_frequency - (self.time_since_start - self.unwind_start_time)
else:
f = self.max_frequency
z = (index - 31.5) / 32 # For 64 bots :^)
x = math.sqrt(1 - z ** 2) * math.cos(z * f)
y = math.sqrt(1 - z ** 2) * math.sin(z * f)
target = vec3(x, y, z) * self.radius
target[2] += self.height
drone.hover.up = normalize(drone.position)
drone.hover.target = target
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
```
#### File: choreography/utils/vector_math.py
```python
from rlutilities.linear_algebra import vec3, norm, normalize
def distance(position: vec3, target: vec3) -> float:
return norm(position - target)
def direction(source: vec3, target: vec3) -> vec3:
return normalize(target - source)
```
|
{
"source": "jeroenbbb/openpilot",
"score": 2
}
|
#### File: selfdrive/can/libdbc_py.py
```python
import os
import subprocess
from selfdrive.swaglog import cloudlog
from cffi import FFI
can_dir = os.path.dirname(os.path.abspath(__file__))
libdbc_fn = os.path.join(can_dir, "libdbc.so")
try:
subprocess.check_call(["make"], cwd=can_dir)
except subprocess.CalledProcessError:
cloudlog.warning("building in can/libdbc_py failed")
ffi = FFI()
ffi.cdef("""
typedef struct {
const char* name;
double value;
} SignalPackValue;
typedef struct {
uint32_t address;
const char* name;
double default_value;
} SignalParseOptions;
typedef struct {
uint32_t address;
int check_frequency;
} MessageParseOptions;
typedef struct {
uint32_t address;
uint16_t ts;
const char* name;
double value;
} SignalValue;
typedef enum {
DEFAULT,
HONDA_CHECKSUM,
HONDA_COUNTER,
TOYOTA_CHECKSUM,
} SignalType;
typedef struct {
const char* name;
int b1, b2, bo;
bool is_signed;
double factor, offset;
SignalType type;
} Signal;
typedef struct {
const char* name;
uint32_t address;
unsigned int size;
size_t num_sigs;
const Signal *sigs;
} Msg;
typedef struct {
const char* name;
uint32_t address;
const char* def_val;
const Signal *sigs;
} Val;
typedef struct {
const char* name;
size_t num_msgs;
const Msg *msgs;
const Val *vals;
size_t num_vals;
} DBC;
void* can_init(int bus, const char* dbc_name,
size_t num_message_options, const MessageParseOptions* message_options,
size_t num_signal_options, const SignalParseOptions* signal_options, bool sendcan,
const char* tcp_addr);
void can_update(void* can, uint64_t sec, bool wait);
size_t can_query(void* can, uint64_t sec, bool *out_can_valid, size_t out_values_size, SignalValue* out_values);
const DBC* dbc_lookup(const char* dbc_name);
void* canpack_init(const char* dbc_name);
uint64_t canpack_pack(void* inst, uint32_t address, size_t num_vals, const SignalPackValue *vals, int counter);
""")
try:
libdbc = ffi.dlopen(libdbc_fn)
except OSError:
cloudlog.warning("load library in can/libdbc_py failed")
```
#### File: selfdrive/loggerd/publish_log.py
```python
import time
import zmq
import sys
import requests
import threading
if __name__ == "__main__":
sys.path.append("/home/pi/openpilot")
import selfdrive.messaging as messaging
import selfdrive.loggerd.telegram as telegram
from selfdrive.services import service_list
from cereal import log
# display all services
for service in service_list:
print (service)
print (service_list[service].port)
# set upload time interval for every message
# name, number of seconds between 2 uploads
upload_interval = {
"gpsLocationExternal": 5,
"navUpdate": 30,
"logMessage": 120,
"health": 300,
"thermal": 30,
"liveMapData": 30
}
# define list for all last uploads
last_upload = {}
# define list to remeber last message so it can be communicated to Telegram
last_message = {}
def upload(msgtype, data):
url = "https://esfahaniran.com/openpilot/index.php"
post_fields = {'type': msgtype, 'data': data}
header = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
try:
# = requests.post(url, data=post_fields, headers=header,timeout=5)
r = requests.post(url, data=post_fields, timeout=5)
# json = urlopen(request).read().decode()
# r = requests.post(url, data={'data': data, 'type': msgtype})
print(r.status_code, r.reason)
print(r.text)
except:
print ("Timeout, no upload")
# check priority, not every message has to be uploaded every time
# and some special fields can be extracted from the message
# see cereal/log.capnp - struct Event for all possible messages
def define_upload_required(evnt):
field1 = ""
field2 = ""
upload_required = False
type = evnt.which()
if type == 'gpsLocationExternal':
# get gps locations
field1 = evnt.gpsLocationExternal.latitude
field2 = evnt.gpsLocationExternal.longitude
# check time in sec since last upload
if type in last_upload:
time_since_last_upload = (evnt.logMonoTime - last_upload[type]) / 1000000000
else:
time_since_last_upload = 1000
# print (time_since_last_upload)
if type in upload_interval:
if upload_interval[type] < time_since_last_upload:
# priority of message type is higher than the last upload
# so a next upload is required
print ("Upload required")
upload_required = True
last_upload[type] = evnt.logMonoTime
return upload_required, field1, field2
def convert_message(evt):
# convert all messages into readible output
which = evt.which()
output = str(evt)
if which == "navUpdate":
hlp = evt.navUpdate.segments
output = str(hlp[0].instruction) + ", distance="
output = output + str(hlp[0].distance) + " meters"
if which == "logMessage":
output = evt.logMessage
if which == "gpsLocationExternal":
lat = str(round(evt.gpsLocationExternal.latitude,5))
lon = str(round(evt.gpsLocationExternal.longitude,5))
speed = str(round(evt.gpsLocationExternal.speed,3))
bearing = str(round(evt.gpsLocationExternal.bearing,0))
accuracy= str(round(evt.gpsLocationExternal.accuracy,0))
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(evt.gpsLocationExternal.timestamp))
output = lat + " " + lon + " " + speed + " m/s " + bearing + " degrees, accuracy in meters=" + accuracy + " " + time_stamp
output = output + " (https://maps.google.com/?q=" + lat
output = output + "," + lon + ")"
return output
def main(gctx=None):
context = zmq.Context()
poller = zmq.Poller()
service_sock = []
count = 0
# start telegram stuff
count = 0
last_update_id = None
print (telegram.get_me())
# loop through all services to define socks
for service in service_list:
print (service)
print (service_list[service].port)
port = service_list[service].port
# service_sock.append(messaging.sub_sock(context, service_list[service].port))
sock = messaging.sub_sock(context, port, poller)
# count = count + 1
# define poller to listen to all sockets
# for i in range(0,count):
# poller.register( service_sock[i], zmq.POLLIN )
# poll all incoming messages
priority = 1
while True:
sock_found = False
polld = poller.poll(timeout=100)
for sock, mode in polld:
sock_found = True
#print (str(sock))
#print (mode)
msg = sock.recv()
# msg = sock.recv_multipart()
# print (str(msg))
# print (msg.decode("ascii"))
evt = log.Event.from_bytes(msg)
print(evt.which())
# remember last message for every message type
last_message[evt.which()] = convert_message(evt)
# check if the message has to be uploaded or not
upload_required, field1, field2 = define_upload_required(evt)
if evt.which() == 'liveMapData':
print(evt)
if priority == 10:
upload(evt.which(), evt)
priority = 0
print (sock_found)
# check if Telegram is asking something
# but only when no messages are waiting
if sock_found == False:
updates = telegram.get_updates(last_update_id)
print (updates)
if len(updates["result"]) > 0:
last_update_id = telegram.get_last_update_id(updates) + 1
telegram.handle_answer(updates, last_message)
time.sleep(2)
print ("Sleep" + str(count))
count = count + 1
# loop through all services to listen to the socks
#while True:
# count = 0
# for service in service_list:
# # read all messages form this socket
# msg = messaging.recv_sock(service_sock[count], wait=False)
# while msg is not None:
# if isinstance(msg, str):
# print (service + "=" + msg)
# else:
# print ("message received from " + service + " " + str(msg))
# #type(msg)
# msg = messaging.recv_sock(service_sock[count], wait=False)
# count = count + 1
# time.sleep(5)
if __name__ == "__main__":
main()
```
|
{
"source": "JeroenBongers96/suii_3d_vision_ros",
"score": 2
}
|
#### File: suii_3d_vision_ros/yolo/yolo_server.py
```python
import cv2
import time
import numpy
#from processing import PostProcessing
from suii_3d_vision_ros.srv import GetRoi, GetRoiResponse
from std_msgs.msg import String
from sensor_msgs.msg import Image
from network import NetworkClient
#from cv_bridge import CvBridge, CvBridgeError
from cv_bridge import CvBridge, CvBridgeError
import rospy
import roslib
#from yolo import Yolo
import struct
import socket
import base64
import json
class Yolo_Wrapper(object):
def __init__(self):
rospy.init_node('get_roi_server')
s = rospy.Service('get_roi', GetRoi, self.handle_get_roi)
print("Server is ready")
self.client = NetworkClient("localhost", 9999)
#data = numpy.load('/home/jeroen/catkin_ws/src/suii_3d_vision_ros/yolo/config/mtx.npz')
#self.mtx = data['mtx']
#data = numpy.load('/home/jeroen/catkin_ws/src/suii_3d_vision_ros/yolo/config/dist.npz')
#self.dist = data['dist']
rospy.spin()
def handle_get_roi(self, req):
cvb_de = CvBridge()
#cv_image = bridge.imgmsg_to_cv2(req, desired_encoding="bgr8")
newimg = cvb_de.imgmsg_to_cv2(req.input, "bgr8")
#image = newimg
#h, w = image.shape[:2]
#newcameramtx, roi=cv2.getOptimalNewCameraMatrix(self.mtx,self.dist,(w,h),1,(w,h))
#dst = cv2.undistort(image, self.mtx, self.dist, None, newcameramtx)
#x,y,w,h = roi
#image = dst[y:y+h, x:x+w]
retval, buff = cv2.imencode('.jpg', newimg)
jpg_enc = base64.b64encode(buff)
# Do no touch, client encoded request
resp = self.client.networkCall(0x00, {"img": jpg_enc})
#End client encoded request
list_of_name = resp['names']
arr_list = []
for x in list_of_name:
#add start int
for y in x:
#add data
arr_list.append(y)
# convert to np array
return GetRoiResponse(arr_list)
if __name__ == "__main__":
wrapper = Yolo_Wrapper()
```
|
{
"source": "JeroenBos/jeroenbos.partest",
"score": 2
}
|
#### File: partest/fixtures/test_fixtures.py
```python
import os
import shutil
import tempfile
from pytest import fixture
from jeroenbos.partest.utils import append_to_file
# all these fixtures are made available to scope tests/** by the import statement in tests/conftest.py
@fixture
def temp_test_file():
path = tempfile.mktemp("_test.py")
append_to_file(
path,
"""# flake8: noqa
# type: ignore
import os
os.environ["METATESTING"] = "true"
import pytest
import unittest
from unittest import TestCase
""",
)
yield path
os.remove(path)
@fixture
def successful_test_file(temp_test_file: str) -> str:
return append_successful_test_file(temp_test_file)
def append_successful_test_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
def test_that_succeeds():
pass
""",
)
return temp_test_file
@fixture
def failing_test_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
def test_that_fails():
raise ValueError("Intended to fail")
""",
)
return temp_test_file
@fixture
def skipped_test_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
@pytest.mark.skip("Intended to be skipped")
def test_that_is_skipped():
raise ValueError("Intended to be skipped")
""",
)
return temp_test_file
@fixture
def skipped_test_with_failing_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestSkippedTestWithFailingTearDown(TestCase):
@pytest.mark.skip("Intended to be skipped")
def test_that_is_skipped():
raise ValueError("Intended to be skipped")
def tearDown(self):
raise ValueError("Teardown intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_setup_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestWithFailingTearDown(TestCase):
def test_after_failed_setup(self):
raise ValueError("After setup intended to fail")
def setUp(self):
raise ValueError("Setup intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestWithFailingTearDown(TestCase):
def test_before_failing_teardown(self):
pass
def tearDown(self):
raise ValueError("Teardown intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_setup_and_teardown_file(test_with_failing_setup_file: str) -> str:
append_to_file(
test_with_failing_setup_file,
"""
def tearDown(self):
raise ValueError("Teardown intended to fail")
""",
)
return test_with_failing_setup_file
@fixture
def test_that_fails_and_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestThatFailsWithFailingTearDown(TestCase):
def test_before_failing_teardown(self):
raise ValueError("Test intended to fail")
def tearDown(self):
raise ValueError("Teardown intended to fail")
""",
)
return temp_test_file
@fixture
def skipped_test_with_failing_class_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestSkippedTestWithFailingTearDown(TestCase):
@pytest.mark.skip("Intended to be skipped")
def test_that_is_skipped():
raise ValueError("Intended to be skipped")
@classmethod
def tearDownClass(cls):
raise ValueError("Class teardown intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_class_setup_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestWithFailingTearDown(TestCase):
def test_after_failed_setup(self):
raise ValueError("After setup intended to fail")
@classmethod
def setUpClass(cls):
raise ValueError("setUpClass intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_class_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestWithFailingTearDown(TestCase):
def test_before_failing_teardown(self):
pass
@classmethod
def tearDownClass(cls):
raise ValueError("tearDownClass intended to fail")
""",
)
return temp_test_file
@fixture
def test_with_failing_class_setup_and_class_teardown_file(test_with_failing_class_setup_file: str) -> str:
append_to_file(
test_with_failing_class_setup_file,
"""
@classmethod
def tearDownClass(cls):
raise ValueError("Class teardown intended to fail")
""",
)
return test_with_failing_class_setup_file
@fixture
def test_that_fails_and_class_teardown_file(temp_test_file: str) -> str:
append_to_file(
temp_test_file,
"""
class TestThatFailsWithFailingTearDown(TestCase):
def test_before_failing_teardown(self):
raise ValueError("Test intended to fail")
@classmethod
def tearDownClass(cls):
raise ValueError("Class teardown intended to fail")
""",
)
return temp_test_file
@fixture
def temp_test_directory():
dir = tempfile.mkdtemp()
append_successful_test_file(os.path.join(dir, "test_file1.py"))
append_successful_test_file(os.path.join(dir, "nested", "test_file2.py"))
yield dir
try:
shutil.rmtree(dir)
except PermissionError:
pass
```
|
{
"source": "jeroenbrons/kubepi-amd64",
"score": 2
}
|
#### File: kubepi-amd64/commands/cmd_platform.py
```python
from kubepi.cli import pass_environment, logger
from kubepi.helpers.git import get_submodules, get_repo
import click
import click_spinner
import git
import os
import subprocess as s
# App group of commands
@click.group('platform', short_help='Platform commands')
@click.pass_context
@pass_environment
def cli(ctx, kube_context):
"""Platform commands to help with handling the codebase and repo"""
pass
@cli.command('init', short_help='Initialize platform components')
@click.argument('submodules',
required=True,
default='all')
@click.argument('repopath',
required=True,
type=click.Path(exists=True),
default=os.getcwd())
@click.pass_context
@pass_environment
def init(ctx, kube_context, submodules, repopath):
"""Init the platform by doing submodule init and checkout
all submodules on master"""
# Get the repo from arguments defaults to cwd
repo = get_repo(repopath)
submodules = get_submodules(repo, submodules)
with click_spinner.spinner():
repo.submodule_update()
logger.info('Platform initialized.')
@cli.command('info', short_help='Get info on platform')
@click.pass_context
@pass_environment
def info(ctx, kube_context):
"""Get info on accessing the platform"""
kube_context = ctx.kube_context
try:
k1s_host = s.run(['kubectl',
'--context',
'k3d-' + kube_context,
'-n',
'k1s',
'get',
'ingressroute',
'ui',
'-o',
'jsonpath={.spec.routes[0].match}'],
capture_output=True, check=True)
k1s_host = k1s_host.stdout.decode('utf-8')
k1s_host = k1s_host.split('`')
k1s_url = k1s_host[1]
logger.info('K1S can be accessed through the URL:')
logger.info('https://' + k1s_url + '/')
except s.CalledProcessError as error:
logger.debug(error.stderr.decode('utf-8'))
raise click.Abort()
@cli.command('token', short_help='Get the platform token')
@click.pass_context
@pass_environment
def token(ctx, kube_context):
"""Get the platform token required by Kubernetes Dashboard"""
kube_context = ctx.kube_context
try:
proc1 = s.Popen(['kubectl',
'--context',
'k3d-' + kube_context,
'-n',
'monitoring',
'describe',
'secret',
'k1s-admin'],
stdout=s.PIPE)
proc2 = s.Popen(['grep', 'token:'],
stdin=proc1.stdout, stdout=s.PIPE, universal_newlines=True)
proc1.stdout.close()
out = proc2.communicate()[0]
logger.info('The platform token is:\n')
logger.info(out)
except s.CalledProcessError as error:
logger.debug(error.stderr.decode('utf-8'))
raise click.Abort()
@cli.command('version', short_help='Get all versions of components')
@click.argument('submodules',
required=True,
default='all')
@click.argument('repopath',
required=True,
type=click.Path(exists=True),
default=os.getcwd())
@click.pass_context
@pass_environment
def version(ctx, kube_context, submodules, repopath):
"""Check versions of services in git submodules
You can provide a comma separated list of submodules
or you can use 'all' for all submodules"""
# Get the repo from arguments defaults to cwd
repo = get_repo(repopath)
submodules = get_submodules(repo, submodules)
# Do something with the submodules
all_sm_details = []
with click_spinner.spinner():
for submodule in submodules:
logger.debug('Switched to submodule: ' + submodule)
sm_details = {}
sm_details['repo'] = submodule
# Are we on an active branch? on a tag? if not then get sha?
try:
smrepo = git.Repo(submodule)
sm_details['present'] = True
except git.InvalidGitRepositoryError as error:
logger.warning(submodule + ': not present')
sm_details['present'] = False
all_sm_details.append(sm_details)
continue
# Get branch
try:
branch = smrepo.active_branch.name
sm_details['branch'] = branch
# Check if remotes are ahead or behind
origin = smrepo.remotes.origin
origin.fetch()
commits_behind = smrepo.iter_commits(branch +
'..origin/' + branch)
commits_ahead = smrepo.iter_commits('origin/' + branch +
'..' + branch)
sm_details['commits_ahead'] = sum(1 for c in commits_ahead)
sm_details['commits_behind'] = sum(1 for c in commits_behind)
except TypeError as error:
sm_details['branch'] = ''
logger.debug(error)
# Check if we point to any tags
points_at_tag = smrepo.git.tag('--points-at', 'HEAD')
sm_details['tag'] = points_at_tag
# Get sha of HEAD
sha = smrepo.head.commit.hexsha
sm_details['sha'] = sha
# Add submodule details to the list
all_sm_details.append(sm_details)
logger.debug('Received following details about the platform submodules:')
logger.debug(all_sm_details)
for sm_details in all_sm_details:
logger.info(sm_details['repo'] + ':')
logger.info('Branch: ' + sm_details['branch'])
logger.info('SHA: ' + sm_details['sha'])
if sm_details['tag']:
logger.info('Tag: ' + sm_details['tag'])
if sm_details['commits_ahead'] > 0:
logger.info('Ahead by: ' +
str(sm_details['commits_ahead']) + ' commits')
if sm_details['commits_behind'] > 0:
logger.info('Behind by: ' +
str(sm_details['commits_behind']) + ' commits')
```
#### File: kubepi-amd64/helpers/git.py
```python
from kubepi.cli import logger
import click
import git
def get_repo(repopath):
try:
return git.Repo(repopath, odbt=git.GitDB)
except git.InvalidGitRepositoryError:
logger.critical('The repo path ' + repopath + ' is not a git repo')
raise click.Abort()
def get_submodules(repo, submodules):
# Based on provided submodules through arguments set the repo objects
# that we want to work with
if submodules == 'all':
submodules = repo.submodules
submodule_list = []
for submodule in submodules:
submodule_list.append(submodule.name)
submodules = submodule_list
else:
submodules = submodules.split(',')
submodule_list = []
for submodule in submodules:
submodule_list.append('platform/' + submodule)
submodules = submodule_list
logger.debug('The provided submodules are:')
logger.debug(submodules)
return(submodules)
```
|
{
"source": "jeroenbrouwer/django-tenant-schemas",
"score": 3
}
|
#### File: django-tenant-schemas/tenant_schemas/rename.py
```python
from django.core.exceptions import ValidationError
from django.db import connection
from tenant_schemas.postgresql_backend.base import _is_valid_schema_name
from tenant_schemas.utils import schema_exists
def rename_schema(*, schema_name, new_schema_name):
"""
This renames a schema to a new name. It checks to see if it exists first
"""
cursor = connection.cursor()
if schema_exists(new_schema_name):
raise ValidationError("New schema name already exists")
if not _is_valid_schema_name(new_schema_name):
raise ValidationError("Invalid string used for the schema name.")
sql = 'ALTER SCHEMA {0} RENAME TO {1}'.format(schema_name, new_schema_name)
cursor.execute(sql)
cursor.close()
```
|
{
"source": "JeroenDeDauw/WikidataIntegrator",
"score": 2
}
|
#### File: wikidataintegrator/ref_handlers/strict_overwrite.py
```python
from datetime import datetime
import copy
####
# Example custom ref handler
# Always replaces all old refs with new refs
####
def strict_overwrite(olditem, newitem):
# modifies olditem in place!!!
olditem.references = newitem.references
```
|
{
"source": "jeroen-dhollander/python-paginator",
"score": 3
}
|
#### File: python-paginator/examples/more_with_rainbow_page_plugin.py
```python
from more_or_less import MorePlugin, Page, PageOfHeight, RepeatableMixin
import more_or_less
import random
import sys
# See https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
# http://www.isthe.com/chongo/tech/comp/ansi_escapes.html
# Make text bold, faint, normal, underlined, blinking, crossed-out, ...
_MODIFIERS = list(range(0, 10))
_FOREGROUND_COLORS = list(range(30, 38)) + list(range(90, 98))
_BACKGROUND_COLORS = list(range(40, 48)) + list(range(100, 108))
def main():
more_or_less.add_plugin(RainbowPlugin)
more_or_less.paginate(input=sys.stdin)
class RainbowPlugin(MorePlugin):
def get_keys(self):
# We trigger our plugin on 'r' and 'R'
return ['r', 'R']
def build_page(self, page_builder, key_pressed, arguments):
# Return our output page.
# For the page height, we either use the value provided on the command line
# (if the user typed 10r),
# or we default to the screen height
height = arguments.get('count', page_builder.get_page_height())
return RainbowPage(output=page_builder.get_output(), height=height)
def get_help(self):
# Help is returned as an iterator over ('key', 'help') tupples
yield ('r or R', 'Rainbowify the next k lines of text [current screen height]')
class RainbowPage(Page, RepeatableMixin):
'''
Rainbowifies every line.
By inheriting from 'RepeatableMixin' we support repeating the command by pressing '.'
'''
def __init__(self, height, output):
self._page = PageOfHeight(height, output=output)
def is_full(self):
return self._page.is_full()
def add_line(self, line):
return self._page.add_line(rainbowify(line))
def flush(self):
return self._page.flush()
def repeat(self):
return RainbowPage(self._page.height, self._page.output)
def rainbowify(line):
modifier = random.choice(_MODIFIERS)
foreground = random.choice(_FOREGROUND_COLORS)
background = random.choice(_BACKGROUND_COLORS)
return f'\x1b[{modifier};{foreground};{background}m{line}\x1b[0m'
if __name__ == "__main__":
main()
```
#### File: python-paginator/more_or_less/count_plugin.py
```python
from .more_plugin import MorePlugin
class CountPlugin(MorePlugin):
'''
Invoked when the user types any number.
Adds a 'count' argument to the next action.
'''
def __init__(self):
self._digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def get_keys(self):
return self._digits
def build_page(self, page_builder, key_pressed, arguments):
arguments['count'] = self._get_count(page_builder, key_pressed)
return page_builder.build_next_page(arguments=arguments)
def get_help(self):
return []
def _get_count(self, page_builder, first_key):
def iter_digits():
# Read characters as long as the user enters digits
key_pressed = first_key
while key_pressed in self._digits:
yield key_pressed
key_pressed = input.get_character(prompt_message)
input.put_back(key_pressed)
input = page_builder.get_input()
prompt_message = page_builder.get_prompt_message()
return int(''.join(iter_digits()))
```
#### File: python-paginator/more_or_less/fixed_size_screen.py
```python
import sys
from more_or_less.screen_dimensions import ScreenDimensions
_HUGE = sys.maxsize
class FixedSizeScreen(ScreenDimensions):
def __init__(self, height=_HUGE, width=_HUGE):
self._height = height
self._width = width
def get_height(self):
return self._height
def get_width(self):
return self._width
```
#### File: python-paginator/more_or_less/more_plugin.py
```python
from abc import ABC, abstractmethod
class MorePlugin(ABC):
'''
A plugin that represents an extra action the user can take on the 'more' prompt.
'''
@abstractmethod
def get_keys(self):
''' Returns a list of the keys the user has to enter to trigger this action. '''
pass
@abstractmethod
def build_page(self, page_builder, key_pressed, arguments):
'''
Called when the user pressed one of the keys to trigger this action.
Arguments:
----------
page_builder:
The MorePageBuilder instance.
key_pressed:
The key the user pressed to trigger this action.
arguments:
A dictionary of arguments the user entered on this line before triggering
this action.
By default, the only value that can be in there is 'count',
which will be set if the user entered a number before your action.
For example, if the user entered '10 ' then the '<space>' action is triggered
with argument {'count': 10}.
'''
pass
def wrap_page(self, page):
'''
Called when a new page is created.
Gives the plugin to return a wrapper page that can be used to modify/register
_every_ line, including the ones that are suppressed by other plugins.
Example usage is counting all the outputted lines.
Must return a 'Page'. Implementing this method is optional.
'''
return page
@abstractmethod
def get_help(self):
'''
Returns an iterator over 'command', 'help-text' tuples that describe how to use
this plugin.
Example:
yield (' ', 'Display next line of text')
'''
pass
```
#### File: python-paginator/more_or_less/one_line_plugin.py
```python
from .more_plugin import MorePlugin
from .page_of_height import PageOfHeight
class OneLinePlugin(MorePlugin):
'''
Displays one more output line.
Invoked when the user types '<enter>'.
'''
def __init__(self):
self._page_height = 1
def get_keys(self):
return ['\r', '\n']
def build_page(self, page_builder, key_pressed, arguments):
self._update_page_height(arguments)
return PageOfHeight(height=self._page_height, output=page_builder.get_output())
def get_help(self):
yield ('<return>', 'Display next k lines of text [{}]*'.format(self._page_height))
def _update_page_height(self, arguments):
self._page_height = arguments.get('count', self._page_height)
```
#### File: python-paginator/more_or_less/output.py
```python
from abc import ABC, abstractmethod
class Output(ABC):
'''
Example API of what is expected from the 'output' object.
This does not mean it must inherit from this.
Note that any 'file' object matches this API,
so files can natively be used as output.
'''
@abstractmethod
def write(self, text):
pass
@abstractmethod
def flush(self):
pass
```
#### File: python-paginator/more_or_less/paginator.py
```python
from .more_page_builder import MorePageBuilder
from .page_builder import StopOutput
import queue
import threading
# Signal to send to the input queue when there is no more input
END_OF_INPUT = None
# Return code if output was interrupted by the user (e.g. the user pressed ctrl+c)
OUTPUT_STOPPED = 'OUTPUT_STOPPED'
def paginate(
input,
output=None,
prompt=None,
screen_dimensions=None,
plugins=None,
page_builder=None,
asynchronous=False):
'''
Paginates the input, similar to how 'more' works in bash.
Reads from input until the output window is full.
Then prompts the user for an action before reading more input.
Pseudo-logic:
-------------
page = page_builder.build_first_page()
for line in <input-lines>:
if page.is_full():
page.flush()
page = page_builder.build_next_page()
page.add_line(line)
Arguments:
----------
input: [type iterable or Queue]
The input text that should be paginated.
This must either be an iterable over text (e.g. a list or a file), or an instance of queue.Queue.
It is not required that each returned string is a complete line.
The paginator will combine incomplete lines until a '\n' is encountered.
If it is a queue.Queue, you must pass 'END_OF_INPUT' into the queue when no more input is expected.
This will flush the final incomplete line (if any) to the output.
Note that you can NOT use queue.join() to detect all input has been processed
(as that just raises issues if the user decides to abort the output halfway through).
Instead, if you use 'asynchronous=True' you can join the returned context.
output: [type Output]
If not specified we print output to stdout
prompt: [type Input]
Used when prompting the user for actions.
Defaults to reading from stdin.
screen_dimensions: [type ScreenDimensions]
Used to know the height of the output window
(which is used to decide how many lines to print before we consider a page 'full').
Defaults to using the dimensions of the terminal window.
plugins: [type list of MorePlugin]
The plugins to load. These plugins decide what actions are available on the 'more' prompt.
If not specified will fetch all plugins from more_plugins.py
asynchronous: [type bool]
If true the 'paginate' call will return instantly and run asynchronously.
In this case a context is returned on which you can call 'context.join([timeout])'
to block until all lines are sent to the output.
page_builder: [type PageBuilder]
The object that will create the output pages whenever a page is full.
Must be an instance of 'PageBuilder'.
If specified we ignore the values of output, prompt, screen_dimensions and plugins.
Returns:
--------
A joinable 'context' if asynchronous is True
OUTPUT_STOPPED if the user stopped the output (for example using ctrl+c)
'''
page_builder = page_builder or MorePageBuilder(
input=prompt,
output=output,
screen_dimensions=screen_dimensions,
plugins=plugins)
if asynchronous:
thread = threading.Thread(
target=paginate,
kwargs={
'input': input,
'page_builder': page_builder,
'asynchronous': False,
},
)
thread.start()
return thread
paginator = Paginator(page_builder)
if isinstance(input, queue.Queue):
return paginator.paginate_from_queue(input)
else:
return paginator.paginate(input)
class Paginator(object):
'''
Paginates given input text, similar to how 'more' works in bash.
See help of 'paginate' for a more detailed description of the behavior.
There are 3 ways to send input text:
- pass an iterable to self.paginate.
- pass a queue to self.paginate_from_queue.
- call 'add_text' repeatedly until all text has been sent in, then call 'flush_incomplete_line'.
Each of these methods returns 'OUTPUT_STOPPED' if the user stopped the output (for example using ctrl+c)
'''
def __init__(self, page_builder):
self._page_builder = page_builder
self._lines = _LineCollector()
self._page = self._page_builder.build_first_page()
def paginate(self, iterable):
'''
Iterates over the iterable, and paginates all the text it returns
'''
try:
for text in iterable:
self._try_to_add_text(text)
self.flush_incomplete_line()
except StopOutput:
return OUTPUT_STOPPED
def paginate_from_queue(self, input_queue):
'''
Iterates over the queue, and paginates all the text it returns.
Stops paginating when END_OF_INPUT is encountered on the queue.
'''
return self.paginate(QueueIterator(input_queue))
def add_text(self, input_text):
'''
Splits the input_text into lines, and paginates them.
Can be called multiple times.
When you're done you must call 'flush_incomplete_line'
to ensure the last incomplete input line is sent to the output.
'''
try:
self._try_to_add_text(input_text)
except StopOutput:
return OUTPUT_STOPPED
def _try_to_add_text(self, input_text):
self._lines.add(input_text)
for line in self._lines.pop_complete_lines():
self._paginate_and_print_text(line)
def flush_incomplete_line(self):
try:
self._try_to_flush_incomplete_line()
except StopOutput:
return OUTPUT_STOPPED
def _try_to_flush_incomplete_line(self):
if len(self._lines.incomplete_line):
self._paginate_and_print_text(self._lines.pop_incomplete_line())
self._page.flush()
def _paginate_and_print_text(self, text):
if self._page.is_full():
self._start_new_page()
self._output_text(text)
def _start_new_page(self):
self._page.flush()
self._page = self._page_builder.build_next_page()
def _output_text(self, text):
self._page.add_line(text)
class _LineCollector(object):
'''
Collects the input text and allows us to walk over the complete lines only.
example:
self.add('first ')
self.add('line \nsecond line\n')
self.add('incomplete final line')
self.pop_complete_lines() <-- returns ['first line', 'second line']
self.pop_incomplete_line() <-- returns 'incomplete final line'
'''
def __init__(self):
self._complete_lines = []
self.incomplete_line = ''
def add(self, text):
assert isinstance(text, str), 'expected str got {}'.format(text.__class__)
unprocessed_text = self.incomplete_line + text
complete_lines, incomplete_line = self._split_lines(unprocessed_text)
self._complete_lines += complete_lines
self.incomplete_line = incomplete_line
def pop_complete_lines(self):
try:
return self._complete_lines
finally:
self._complete_lines = []
def pop_incomplete_line(self):
try:
return self.incomplete_line
finally:
self.incomplete_line = ''
def _split_lines(self, text):
lines = text.splitlines(True)
if self._has_incomplete_line(lines):
complete_lines = lines[:-1]
incomplete_line = lines[-1]
else:
complete_lines = lines
incomplete_line = ''
return (complete_lines, incomplete_line)
def _has_incomplete_line(self, lines):
return len(lines) and not lines[-1].endswith('\n')
def _make_callable(value):
if not callable(value):
return lambda: value
else:
return value
class QueueIterator(object):
'''
Iterates over a queue, until END_OF_INPUT is encountered
'''
def __init__(self, queue):
self._queue = queue
def __iter__(self):
return self
def __next__(self):
text = self._queue.get()
if text is END_OF_INPUT:
raise StopIteration
return text
```
#### File: python-paginator/more_or_less/search_plugin.py
```python
from .more_plugin import MorePlugin
from .page import Page
from .page_of_height import PageOfHeight
from .repeatable_mixin import RepeatableMixin
import re
_NO_PREVIOUS_REGULAR_EXPRESSION = '--No previous regular expression--'
_SKIPPING_MESSAGE = '...skipping\n'
class SearchPlugin(MorePlugin):
'''
Skips all output until a certain search pattern is found.
Invoked when the user types '/'.
The search can be repeated by pressing 'n'
'''
def __init__(self):
self._pattern = None
self._match_count = None
def get_keys(self):
return ['/', 'n']
def build_page(self, page_builder, key_pressed, arguments):
self._match_count = arguments.get('count', 1)
if key_pressed == '/':
return self._do_new_search(page_builder)
elif key_pressed == 'n':
return self._repeat_last_search(page_builder)
else:
assert False, 'Unexpected input key'
def get_help(self):
yield ('/<regular expression>', 'Search for kth occurrence of the regular expression [1]')
yield ('n', 'Search for kth occurrence of the last regular expression [1]')
def _do_new_search(self, page_builder):
self._update_pattern(page_builder.get_input())
return self._create_search_page(page_builder)
def _repeat_last_search(self, page_builder):
if self._pattern is None:
return page_builder.build_next_page(message=_NO_PREVIOUS_REGULAR_EXPRESSION)
else:
return self._create_search_page(page_builder)
def _create_search_page(self, page_builder):
page_builder.get_output().write(_SKIPPING_MESSAGE)
return SearchPage(
pattern=self._pattern,
next_page=self._create_full_page(page_builder),
match_count=self._match_count,
)
def _create_full_page(self, page_builder):
return PageOfHeight(
height=page_builder.get_page_height(),
output=page_builder.get_output())
def _update_pattern(self, input):
self._pattern = input.prompt('/')
class SearchPage(Page, RepeatableMixin):
'''
A page that suppresses all output until a given search pattern is found.
After that it displays the passed in page
'''
def __init__(self, pattern, next_page, match_count):
self.pattern = pattern
self.next_page = next_page
self._matcher = re.compile(pattern)
self._actual_match_count = 0
self.required_match_count = match_count
def is_full(self):
if self.has_match:
return self.next_page.is_full()
return False
def add_line(self, line):
self._match(line)
if self.has_match:
self.next_page.add_line(line)
def _match(self, line):
if self._matcher.search(line):
self._actual_match_count = self._actual_match_count + 1
def flush(self):
if self.has_match:
self.next_page.flush()
def repeat(self):
return SearchPage(self.pattern, self.next_page.repeat(), self.required_match_count)
@property
def has_match(self):
return self._actual_match_count >= self.required_match_count
```
#### File: python-paginator/more_or_less/wrapped_page.py
```python
from .page import Page
from abc import ABC, abstractmethod
class WrappedPage(Page, ABC):
'''
Basic class that can be derived from if you need to
create a Page that
- records all printed lines
- can change the printed lines before forwarding.
'''
def __init__(self, wrapped_page):
self.wrapped_page = wrapped_page
def is_full(self):
return self.wrapped_page.is_full()
def add_line(self, line):
new_line = self.on_add_line(line)
return self.wrapped_page.add_line(new_line)
def flush(self):
return self.wrapped_page.flush()
@abstractmethod
def on_add_line(self, line):
''' Called with every line. Returns the modified version of the line '''
pass
def __getattr__(self, name):
return getattr(self.wrapped_page, name)
```
#### File: python-paginator/tests/test_line_count_plugin.py
```python
from more_or_less.input import Input
from more_or_less.output import Output
from tests.test_more_page_builder import TestUtil
from unittest.mock import Mock, call
class TestLineCountPlugin(TestUtil):
def setUp(self):
self.input = Mock(Input)
self.output = Mock(Output)
self.builder = self.get_more_page_builder(input=self.input, output=self.output)
def print_n_lines(self, n):
page = self.builder.build_first_page()
for i in range(0, n):
page.add_line(f'line {i}\n')
def test_prints_line_number_when_user_types_equal(self):
self.print_n_lines(10)
self.input.get_character.side_effect = ['=', ' ']
self.builder.build_next_page()
self.input.get_character.assert_has_calls([
call('--More--'),
call('--10--'),
])
def test_returns_next_page_after_printing_line_number(self):
self.print_n_lines(10)
self.input.get_character.side_effect = ['=', ' ']
page = self.builder.build_next_page()
self.assertIsFullscreenPage(page)
def test_prints_line_numbers_after_pressing_l(self):
first_page = self.builder.build_first_page()
first_page.add_line('before enabling line-numbers\n')
self.input.get_character.side_effect = ['l', ' ']
page = self.builder.build_next_page()
page.add_line('after enabling line-numbers\n')
self.output.assert_has_calls([
call.write('before enabling line-numbers\n'),
call.write('2: after enabling line-numbers\n'),
])
def test_stops_printing_line_numbers_after_pressing_l_again(self):
first_page = self.builder.build_first_page()
first_page.add_line('before enabling line-numbers\n')
self.input.get_character.side_effect = ['l', ' ']
page = self.builder.build_next_page()
page.add_line('after enabling line-numbers\n')
self.input.get_character.side_effect = ['l', ' ']
page = self.builder.build_next_page()
page.add_line('after disabling line-numbers\n')
self.output.assert_has_calls([
call.write('before enabling line-numbers\n'),
call.write('2: after enabling line-numbers\n'),
call.write('after disabling line-numbers\n'),
])
def test_prints_status_in_prompt_when_enabling_or_disabling_line_numbers(self):
self.input.get_character.side_effect = ['l', 'l', ' ']
self.builder.build_next_page()
self.input.assert_has_calls([
call.get_character('--Line numbers are now enabled--'),
call.get_character('--Line numbers are now disabled--'),
])
```
#### File: python-paginator/tests/test_more_page_builder.py
```python
from more_or_less import PageOfHeight
from more_or_less.fixed_size_screen import FixedSizeScreen
from more_or_less.input import Input
from more_or_less.more_page_builder import MorePageBuilder
from more_or_less.output import Output
from more_or_less.page_builder import StopOutput
from more_or_less.wrapped_page import WrappedPage
from unittest.mock import Mock
import unittest
class TestUtil(unittest.TestCase):
def assertIsPageOfType(self, page, page_type):
''' assertIsInstance, but will first strip page-wrappers '''
page = _skip_page_wrappers(page)
self.assertIsInstance(page, page_type)
def assertIsPageOfHeight(self, page, height):
self.assertIsPageOfType(page, PageOfHeight)
self.assertEqual(height, page.height)
def assertIsFullscreenPage(self, page, screen_height=1000):
self.assertIsPageOfHeight(page, _page_height_for_screen(screen_height))
def get_more_page_builder(self, output=None, input=None, plugins=None, screen_height=1000):
return MorePageBuilder(
input=input or Mock(Input),
output=output or Mock(Output),
screen_dimensions=FixedSizeScreen(height=screen_height),
plugins=plugins,
)
class TestMorePageBuilder(TestUtil):
def test_build_first_page_returns_page_of_screen_height_minus_one(self):
screen_height = 10
builder = self.get_more_page_builder(screen_height=screen_height)
page = builder.build_first_page()
self.assertIsPageOfHeight(page, screen_height - 1)
def test_build_next_page_prompts_user_for_action(self):
input = Mock(Input)
input.get_character.return_value = ' '
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.assert_called_once_with('--More--')
def test_returns_full_screen_page_if_user_presses_space(self):
screen_height = 10
input = Mock(Input)
builder = self.get_more_page_builder(input=input, screen_height=10)
input.get_character.return_value = ' '
page = builder.build_next_page()
self.assertIsFullscreenPage(page, screen_height)
def test_returns_one_line_page_if_user_presses_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\r'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_enter_works_both_on_newline_and_carriage_return(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\n'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_stops_output_if_user_presses_q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_if_user_presses_Q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'Q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_on_ctrl_c(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = KeyboardInterrupt
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_ignores_unexpected_user_input(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['a', 'b', 'c', '\r']
builder.build_next_page()
self.assertEqual(4, input.get_character.call_count)
def test_user_can_enter_count_before_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_becomes_the_new_default_for_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
builder.build_next_page()
input.get_character.side_effect = ['\n']
second_page = builder.build_next_page()
self.assertIsPageOfHeight(second_page, 5)
def test_can_specify_count_bigger_than_10(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '0', '0', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 500)
def test_user_can_enter_count_before_space(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', ' ']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_does_not_become_the_new_default_for_space(self):
input = Mock(Input)
screen_height = 666
builder = self.get_more_page_builder(input=input, screen_height=screen_height)
input.get_character.side_effect = ['5', ' ']
builder.build_next_page()
input.get_character.side_effect = [' ']
second_page = builder.build_next_page()
self.assertIsFullscreenPage(second_page, screen_height)
def _page_height_for_screen(screen_height):
height_reserved_for_more_prompt = 1
return screen_height - height_reserved_for_more_prompt
def _skip_page_wrappers(page):
while isinstance(page, WrappedPage):
page = page.wrapped_page
return page
```
#### File: python-paginator/tests/test_repeat_plugin.py
```python
from more_or_less import more_plugins
from more_or_less.input import Input
from more_or_less.more_plugin import MorePlugin
from more_or_less.output import Output
from more_or_less.page import Page
from more_or_less.search_plugin import SearchPage
from tests.test_more_page_builder import TestUtil
from unittest.mock import Mock, call
_UNREPEATABLE_PAGE_KEY = 'U'
class TestRepeatPlugin(TestUtil):
def setUp(self):
self.input = Mock(Input)
self.output = Mock(Output)
plugins = more_plugins.get() + [UnrepeatablePagePlugin()]
self.builder = self.get_more_page_builder(
input=self.input,
output=self.output,
plugins=plugins)
def fill_page(self, page):
while not page.is_full():
page.add_line('line \n')
def test_can_repeat_enter(self):
self.input.get_character.side_effect = ['5', '\n']
page = self.builder.build_next_page()
self.fill_page(page)
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfHeight(repeated_page, 5)
self.assertFalse(repeated_page.is_full())
def test_can_repeat_space(self):
self.input.get_character.side_effect = [' ']
page = self.builder.build_next_page()
self.fill_page(page)
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfHeight(repeated_page, page.height)
def test_can_repeat_search(self):
self.input.get_character.side_effect = ['5', '/']
self.input.prompt.return_value = 'the pattern'
self.builder.build_next_page()
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfType(repeated_page, SearchPage)
self.assertEqual('the pattern', repeated_page.pattern)
self.assertEqual(5, repeated_page.required_match_count)
def test_prints_warning_on_unrepeatable_command(self):
self.input.get_character.side_effect = [_UNREPEATABLE_PAGE_KEY]
self.builder.build_next_page()
self.input.get_character.side_effect = ['.', ' ', ' ']
self.builder.build_next_page()
self.input.assert_has_calls([
call.get_character('--More--'),
call.get_character('--Previous command can not be repeated--'),
])
class UnrepeatablePage(Page):
def is_full(self):
return False
def add_line(self, line):
pass
class UnrepeatablePagePlugin(MorePlugin):
'''
Plugin that returns a page of type 'DefaultPage'
'''
def get_keys(self):
return [_UNREPEATABLE_PAGE_KEY]
def build_page(self, page_builder, key_pressed, arguments):
return UnrepeatablePage()
def get_help(self):
pass
```
#### File: python-paginator/tests/test_search_plugin.py
```python
from more_or_less.input import Input
from more_or_less.output import Output
from more_or_less.page import Page
from more_or_less.search_plugin import SearchPage
from tests.test_more_page_builder import TestUtil
from unittest.mock import Mock, call
import unittest
class TestSearchPlugin(TestUtil):
def assertIsSearchPageWithPattern(self, page, pattern):
self.assertIsPageOfType(page, SearchPage)
self.assertEqual(pattern, page.pattern)
def assertIsSearchPageWithMatchCount(self, page, match_count):
self.assertIsPageOfType(page, SearchPage)
self.assertEqual(match_count, page.required_match_count)
def test_creates_search_page_when_pressing_slash(self):
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.return_value = ''
builder = self.get_more_page_builder(input=input)
page = builder.build_next_page()
self.assertIsSearchPageWithPattern(page, pattern='')
def test_passes_search_pattern_to_search_page(self):
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.return_value = 'the-pattern'
builder = self.get_more_page_builder(input=input)
page = builder.build_next_page()
self.assertIsSearchPageWithPattern(page, pattern='the-pattern')
def test_n_repeats_previous_search(self):
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.return_value = 'n'
second_page = builder.build_next_page()
self.assertIsSearchPageWithPattern(second_page, pattern='the-pattern')
def test_n_without_previous_search_prints_error_in_prompt(self):
input = Mock(Input)
input.get_character.side_effect = ['n', ' ']
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.assert_has_calls([
call('--More--'),
call('--No previous regular expression--'),
])
def test_prints_skipping_text_to_output(self):
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.side_effect = ['the-pattern']
output = Mock(Output)
builder = self.get_more_page_builder(input=input, output=output)
builder.build_next_page()
output.write.assert_called_once_with('...skipping\n')
def test_passes_full_page_to_search_page(self):
screen_height = 100
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input, screen_height=screen_height)
page = builder.build_next_page()
self.assertIsFullscreenPage(page.next_page, screen_height=screen_height)
def test_passes_count_1_to_search_page_by_default(self):
input = Mock(Input)
input.get_character.return_value = '/'
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input)
page = builder.build_next_page()
self.assertIsSearchPageWithMatchCount(page, match_count=1)
def test_passes_count_to_search_page(self):
input = Mock(Input)
input.get_character.side_effect = ['5', '/']
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input)
page = builder.build_next_page()
self.assertIsSearchPageWithMatchCount(page, match_count=5)
def test_n_defaults_to_match_count_1(self):
input = Mock(Input)
input.get_character.side_effect = ['5', '/']
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.side_effect = ['n']
second_page = builder.build_next_page()
self.assertIsSearchPageWithMatchCount(second_page, match_count=1)
def test_n_accepts_a_count(self):
input = Mock(Input)
input.get_character.side_effect = ['/']
input.prompt.side_effect = ['the-pattern']
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.side_effect = ['7', 'n']
second_page = builder.build_next_page()
self.assertIsSearchPageWithMatchCount(second_page, match_count=7)
class TestSearchPage(unittest.TestCase):
def setUp(self):
self.next_page = Mock(Page)
def create_search_page(self, pattern='', match_count=1):
return SearchPage(pattern=pattern, next_page=self.next_page, match_count=match_count)
def test_add_line_blackholed_if_it_doesnt_match(self):
page = self.create_search_page('the-pattern')
page.add_line('this does not match the pattern')
self.next_page.add_line.assert_not_called()
def test_add_line_forwarded_if_it_matches_the_pattern(self):
page = self.create_search_page('the.*pattern')
page.add_line('this matches the regex pattern')
self.next_page.add_line.assert_called_once_with('this matches the regex pattern')
def test_add_line_forwarded_if_previous_line_matched_the_pattern(self):
page = self.create_search_page('the.*pattern')
page.add_line('this matches the regex pattern')
page.add_line('next line')
self.next_page.add_line.assert_has_calls([
call('this matches the regex pattern'),
call('next line'),
])
def test_must_match_the_provided_number_of_times(self):
match_count = 5
page = self.create_search_page('the.*pattern', match_count=match_count - 1)
page.add_line('this matches the regex pattern the 1th time')
page.add_line('this matches the regex pattern the 2nd time')
page.add_line('this matches the regex pattern the 3rd time')
page.add_line('this matches the regex pattern the 4th time')
page.add_line('this matches the regex pattern the 5th time')
self.next_page.add_line.assert_has_calls([
call('this matches the regex pattern the 5th time'),
])
def test_is_full_false_initially(self):
page = self.create_search_page()
self.assertFalse(page.is_full())
def test_is_full_does_not_contact_next_page_if_pattern_is_not_matched(self):
page = self.create_search_page()
page.is_full()
self.next_page.is_full.assert_not_called()
def test_is_full_forwarded_to_next_page_after_pattern_has_been_matched(self):
page = self.create_search_page('the.*pattern')
page.add_line('this matches the regex pattern')
page.is_full()
self.next_page.is_full.assert_called_once()
def test_flush_not_forwarded_if_pattern_is_not_matched(self):
page = self.create_search_page()
page.flush()
self.next_page.flush.assert_not_called()
def test_flush_forwarded_to_next_page_after_pattern_has_been_matched(self):
page = self.create_search_page('the.*pattern')
page.add_line('this matches the regex pattern')
page.flush()
self.next_page.flush.assert_called_once()
```
|
{
"source": "JeroenDM/acrobotics",
"score": 2
}
|
#### File: acrobotics/inverse_kinematics/spherical_wrist.py
```python
import numpy as np
from .ik_result import IKResult
def ik(T, tf_base) -> IKResult:
""" TODO add base frame correction
"""
Rbase = tf_base[:3, :3]
Ree = T[:3, :3]
Ree_rel = np.dot(Rbase.transpose(), Ree)
# ignore position
# n s a according to convention Siciliano
n = Ree_rel[:3, 0]
s = Ree_rel[:3, 1]
a = Ree_rel[:3, 2]
A = np.sqrt(a[0] ** 2 + a[1] ** 2)
# solution with theta2 in (0, pi)
t1_1 = np.arctan2(a[1], a[0])
t2_1 = np.arctan2(A, a[2])
t3_1 = np.arctan2(s[2], -n[2])
# solution with theta2 in (-pi, 0)
t1_2 = np.arctan2(-a[1], -a[0])
t2_2 = np.arctan2(-A, a[2])
t3_2 = np.arctan2(-s[2], n[2])
q_sol = np.zeros((2, 3))
q_sol[0, 0], q_sol[0, 1], q_sol[0, 2] = t1_1, t2_1, t3_1
q_sol[1, 0], q_sol[1, 1], q_sol[1, 2] = t1_2, t2_2, t3_2
return IKResult(True, q_sol)
```
#### File: src/acrobotics/link.py
```python
import numpy as np
import casadi as ca
from collections import namedtuple
from enum import Enum
DHLink = namedtuple("DHLink", ["a", "alpha", "d", "theta"])
class JointType(Enum):
revolute = "r"
prismatic = "p"
class LinkKinematics:
""" Robot link according to the Denavit-Hartenberg convention. """
def __init__(self, dh_parameters: DHLink, joint_type: JointType):
""" Creates a linkf from Denavit-Hartenberg parameters,
a joint type ('r' for revolute, 'p' for prismatic) and
a Scene of Shapes representing the geometry.
"""
self.dh = dh_parameters
if joint_type in JointType:
self.joint_type = joint_type
else:
raise ValueError(f"Unkown JointType: {joint_type}.")
# chache a transform because creating it is slow
# but just fillin in an existing one is ok
self._T = np.eye(4)
def get_link_relative_transform(self, qi):
""" transformation matrix from link i relative to i-1
Links and joints are numbered from 1 to ndof, but python
indexing of these links goes from 0 to ndof-1!
"""
if self.joint_type == JointType.revolute:
a, alpha, d, theta = self.dh.a, self.dh.alpha, self.dh.d, qi
elif self.joint_type == JointType.prismatic:
a, alpha, d, theta = self.dh.a, self.dh.alpha, qi, self.dh.theta
c_theta = np.cos(theta)
s_theta = np.sin(theta)
c_alpha = np.cos(alpha)
s_alpha = np.sin(alpha)
T = self._T
T[0, 0], T[0, 1] = c_theta, -s_theta * c_alpha
T[0, 2], T[0, 3] = s_theta * s_alpha, a * c_theta
T[1, 0], T[1, 1] = s_theta, c_theta * c_alpha
T[1, 2], T[1, 3] = -c_theta * s_alpha, a * s_theta
T[2, 1], T[2, 2], T[2, 3] = s_alpha, c_alpha, d
return T
def get_link_relative_transform_casadi(self, qi):
""" Link transform according to the Denavit-Hartenberg convention.
Casadi compatible function.
"""
if self.joint_type == JointType.revolute:
a, alpha, d, theta = self.dh.a, self.dh.alpha, self.dh.d, qi
elif self.joint_type == JointType.prismatic:
a, alpha, d, theta = self.dh.a, self.dh.alpha, qi, self.dh.theta
c_t, s_t = ca.cos(theta), ca.sin(theta)
c_a, s_a = ca.cos(alpha), ca.sin(alpha)
row1 = ca.hcat([c_t, -s_t * c_a, s_t * s_a, a * c_t])
row2 = ca.hcat([s_t, c_t * c_a, -c_t * s_a, a * s_t])
row3 = ca.hcat([0, s_a, c_a, d])
row4 = ca.hcat([0, 0, 0, 1])
return ca.vcat([row1, row2, row3, row4])
class Link(LinkKinematics):
def __init__(self, dh_parameters, joint_type, geometry):
super().__init__(dh_parameters, joint_type)
self.geometry = geometry
def plot(self, ax, tf, *arg, **kwarg):
self.geometry.plot(ax, tf=tf, *arg, **kwarg)
```
#### File: acrobotics/path/factory.py
```python
import numpy as np
from copy import deepcopy
from typing import List
from numpy.linalg import norm
from acrolib.quaternion import Quaternion
from .path_pt import TolPositionPt, PathPt
def check_num_points(num_points: int):
if num_points < 2:
raise Exception(f"Value of num_points must be 2 or more, not {num_points}.")
def create_line(start_pt: PathPt, end_pos: np.ndarray, num_points: int) -> List[PathPt]:
""" Copy a given toleranced PathPt along a straight line."""
check_num_points(num_points)
trans_vec = (end_pos - start_pt.pos) / (num_points - 1)
path = [start_pt]
for _ in range(num_points - 1):
new_pt = deepcopy(path[-1])
new_pt.translate(trans_vec)
path.append(new_pt)
return path
def create_circle(
start_pt: PathPt, mid_point: np.ndarray, rotation_axis: np.ndarray, num_points: int
):
"""Copy a given toleranced PathPt along a circle with a given mid point and rotation axis."""
check_num_points(num_points)
return create_arc(start_pt, mid_point, rotation_axis, 2 * np.pi, num_points)
def create_arc(
start_pt: PathPt, mid_point: np.ndarray, rotation_axis, angle, num_points
):
"""Copy a given toleranced PathPt along an arc with a given mid point and rotation axis."""
check_num_points(num_points)
rotation_axis = rotation_axis / norm(rotation_axis)
rotating_vector = start_pt.pos - mid_point
a = np.linspace(angle / (num_points - 1), angle, num_points - 1)
path = [deepcopy(start_pt)]
for ai in a:
rot_mat = Quaternion(angle=ai, axis=rotation_axis).rotation_matrix
offset = (rot_mat @ rotating_vector) - rotating_vector
new_pt = deepcopy(start_pt)
new_pt.translate(offset)
new_pt.rotate(rot_mat)
path.append(new_pt)
return path
```
#### File: acrobotics/planning/settings.py
```python
from numpy import ndarray
from typing import List
from ..path.sampling import SamplingSetting
from .types import SolveMethod, CostFuntionType
class OptSettings:
"""
Settings for the numerical optimization based planners.
"""
def __init__(
self,
q_init: ndarray = None,
max_iters: int = None,
weights: List[float] = None,
con_objective_weight=0.0,
):
# q init is handled when whe know the path length and the ndof of the robot
self.q_init = q_init
self.weights = weights
self.con_objective_weight = con_objective_weight
if max_iters is None:
self.max_iters = 100
else:
self.max_iters = max_iters
class SolverSettings:
def __init__(
self,
solve_method: SolveMethod,
cost_function_type: CostFuntionType,
sampling_settings: SamplingSetting = None,
opt_settings: OptSettings = None,
):
self.solve_method = solve_method
self.cost_function_type = cost_function_type
if solve_method == SolveMethod.sampling_based:
assert sampling_settings is not None
self.sampling_settings = sampling_settings
# fill in the correct cost function based on the type
elif solve_method == SolveMethod.optimization_based:
assert opt_settings is not None
self.opt_settings = opt_settings
```
#### File: src/acrobotics/robot.py
```python
import numpy as np
import casadi as ca
from abc import ABC
from collections import namedtuple
from matplotlib import animation
from typing import List, Callable
from acrobotics.geometry import Scene
from acrobotics.link import Link
from acrolib.plotting import plot_reference_frame
JointLimit = namedtuple("JointLimit", ["lower", "upper"])
class IKResult:
def __init__(self, success: bool, solutions: List[np.ndarray] = None):
self.success = success
if self.success:
assert solutions is not None
self.solutions = solutions
class Tool(Scene):
""" Geometric shapes with added atribute tool tip transform tf_tt
relative to the last link.
"""
def __init__(self, shapes, tf_shapes, tf_tool_tip):
"""
tf_tool_tip relative to last link robot.
"""
super().__init__(shapes, tf_shapes)
self.tf_tool_tip = tf_tool_tip
class RobotKinematics:
""" Robot kinematics and shape
(inital joint values not implemented)
"""
def __init__(self, links: List[Link], joint_limits: List[JointLimit] = None):
self.links = links
self.ndof = len(links)
# set default joint limits
if joint_limits is None:
self.joint_limits = [JointLimit(-np.pi, np.pi)] * self.ndof
else:
self.joint_limits = joint_limits
# pose of base with respect to the global reference frame
# this is independent from the geometry of the base,
# for the whole robot
self.tf_base = np.eye(4)
# pose of the tool tip relative to last link robot.
self.tf_tool_tip = None
def fk(self, q) -> np.ndarray:
""" Return end effector frame, either last link, or tool frame
if tool available
"""
T = self.tf_base
for i in range(0, self.ndof):
Ti = self.links[i].get_link_relative_transform(q[i])
T = T @ Ti
if self.tf_tool_tip is not None:
T = np.dot(T, self.tf_tool_tip)
return T
def fk_rpy(self, q) -> np.ndarray:
T = self.fk(q)
s = np.sqrt(T[0, 0] * T[0, 0] + T[0, 1] * T[0, 1])
r_x = np.arctan2(-T[1, 2], T[2, 2])
r_y = np.arctan2(T[0, 2], s)
r_z = np.arctan2(-T[0, 1], T[2, 2])
out = np.zeros(6)
out[:3] = T[:3, 3]
out[3:] = [r_x, r_y, r_z]
return out
def fk_all_links(self, q) -> List[np.ndarray]:
""" Return link frames (not base or tool)
"""
tf_links = []
T = self.tf_base
for i in range(0, self.ndof):
Ti = self.links[i].get_link_relative_transform(q[i])
T = T @ Ti
tf_links.append(T)
return tf_links
def ik(self, transformation_matrix) -> IKResult:
raise NotImplementedError
def estimate_max_extension(self):
max_ext = 0
for link in self.links:
max_ext += abs(link.dh.a) + abs(link.dh.d)
return max_ext
def plot_kinematics(self, ax, q, *arg, **kwarg):
# base frame (0)
plot_reference_frame(ax, self.tf_base)
# link frames (1-ndof)
tf_links = self.fk_all_links(q)
points = [tf[0:3, 3] for tf in tf_links]
points = np.array(points)
points = np.vstack((self.tf_base[0:3, 3], points))
ax.plot(points[:, 0], points[:, 1], points[:, 2], "o-")
for tfi in tf_links:
plot_reference_frame(ax, tfi)
# tool tip frame
if self.tf_tool_tip is not None:
tf_tt = np.dot(tf_links[-1], self.tf_tool_tip)
plot_reference_frame(ax, tf_tt)
class RobotCasadiKinematics(ABC):
ndof: int
links: List[Link]
tf_base: np.ndarray
tf_tool_tip: np.ndarray
jacobian_position_fun: Callable
jacobian_rpy_fun: Callable
def fk_casadi(self, q):
T = self.tf_base
for i in range(0, self.ndof):
Ti = self.links[i].get_link_relative_transform_casadi(q[i])
T = T @ Ti
if self.tf_tool_tip is not None:
T = T @ self.tf_tool_tip
return T
def fk_rpy_casadi(self, q):
T = self.fk_casadi(q)
s = ca.sqrt(T[0, 0] * T[0, 0] + T[0, 1] * T[0, 1])
r_x = ca.arctan2(-T[1, 2], T[2, 2])
r_y = ca.arctan2(T[0, 2], s)
r_z = ca.arctan2(-T[0, 1], T[2, 2])
return ca.vcat([T[:3, 3], r_x, r_y, r_z])
def fk_all_links_casadi(self, q):
""" Return link frames (not base or tool)
"""
tf_links = []
T = self.tf_base
for i in range(0, self.ndof):
Ti = self.links[i].get_link_relative_transform_casadi(q[i])
T = T @ Ti
tf_links.append(T)
return tf_links
def jacobian_position(self, q):
return self.jacobian_position_fun(q)
def jacobian_rpy(self, q):
return self.jacobian_rpy_fun(q)
def _create_jacobian_position(self):
q = ca.MX.sym("q", self.ndof)
jac = ca.jacobian(self.fk_casadi(q)[:3, 3], q)
return ca.Function("jac_fun", [q], [jac])
def _create_jacobian_rpy(self):
q = ca.MX.sym("q", self.ndof)
jac = ca.jacobian(self.fk_rpy_casadi(q), q)
return ca.Function("jac_fun", [q], [jac])
class Robot(RobotKinematics, RobotCasadiKinematics):
def __init__(self, links, joint_limits=None):
super().__init__(links, joint_limits)
# defaul has no fixed base geometry, no tool and
self.geometry_base = None
self.geometry_tool = None
self.do_check_self_collision = True
# self collision matrix
# default: do not check link neighbours, create band structure matrix
temp = np.ones((self.ndof, self.ndof), dtype="bool")
self.collision_matrix = np.tril(temp, k=-3) + np.triu(temp, k=3)
# keep track of most likly links to be in collision
self.collision_priority = list(range(self.ndof))
# loggers to get performance criteria
self.cc_checks = 0
self.jacobian_position_fun = self._create_jacobian_position()
self.jacobian_rpy_fun = self._create_jacobian_rpy()
@property
def tool(self):
return self.geometry_tool
@tool.setter
def tool(self, new_tool: Tool):
self.tf_tool_tip = new_tool.tf_tool_tip
self.geometry_tool = new_tool
def _check_self_collision(self, tf_links, geom_links):
for i, ti, gi in zip(range(self.ndof), tf_links, geom_links):
for j, tj, gj in zip(range(self.ndof), tf_links, geom_links):
if self.collision_matrix[i, j]:
if gi.is_in_collision(gj, tf_self=ti, tf_other=tj):
return True
# do not check tool against the last link where it is mounted
if self.geometry_tool is not None:
tf_tool = tf_links[-1]
for tf_link, geom_link in zip(tf_links[:-1], geom_links[:-1]):
if geom_link.is_in_collision(
self.geometry_tool, tf_self=tf_link, tf_other=tf_tool
):
return True
return False
def _is_in_limits(self, q):
for qi, limit in zip(q, self.joint_limits):
if qi > limit.upper or qi < limit.lower:
return False
return True
@staticmethod
def _linear_interpolation_path(q_start, q_goal, max_q_step):
q_start, q_goal = np.array(q_start), np.array(q_goal)
q_diff = np.linalg.norm(q_goal - q_start)
num_steps = int(np.ceil(q_diff / max_q_step))
S = np.linspace(0, 1, num_steps)
return [(1 - s) * q_start + s * q_goal for s in S]
def is_in_self_collision(self, q):
geom_links = [l.geometry for l in self.links]
tf_links = self.fk_all_links(q)
return self._check_self_collision(tf_links, geom_links)
def is_in_collision(self, q, collection):
self.cc_checks += 1
if collection is not None:
geom_links = [l.geometry for l in self.links]
tf_links = self.fk_all_links(q)
# check collision with tool first
if self.geometry_tool is not None:
tf_tool = tf_links[-1]
if self.geometry_tool.is_in_collision(collection, tf_self=tf_tool):
return True
# check collision of fixed base geometry
base = self.geometry_base
if base is not None:
if base.is_in_collision(collection, tf_self=self.tf_base):
return True
# check collision for all links
for i in self.collision_priority:
if geom_links[i].is_in_collision(collection, tf_self=tf_links[i]):
# move current index to front of priority list
self.collision_priority.remove(i)
self.collision_priority.insert(0, i)
return True
if self.do_check_self_collision:
if self._check_self_collision(tf_links, geom_links):
return True
return False
def is_path_in_collision_discrete(
self, q_start, q_goal, collection, max_q_step=0.1
):
""" Check for collision with linear interpolation between start and goal.
"""
for q in self._linear_interpolation_path(q_start, q_goal, max_q_step):
if self.is_in_collision(q, collection):
return True
return False
def is_path_in_collision(self, q_start, q_goal, collection: Scene):
""" Check for collision using the continuous collision checking
stuff from fcl.
- We do not check for self collision on a path.
- Base is assumed not to move.
"""
geom_links = [l.geometry for l in self.links]
tf_links = self.fk_all_links(q_start)
tf_links_target = self.fk_all_links(q_goal)
# check collision with tool first
if self.geometry_tool is not None:
if self.geometry_tool.is_path_in_collision(
tf_links[-1], tf_links_target[-1], collection
):
return True
# Base is assumed to be always fixed
base = self.geometry_base
if base is not None:
if base.is_in_collision(collection, tf_self=self.tf_base):
return True
# check collision for all links
for i in self.collision_priority:
if geom_links[i].is_path_in_collision(
tf_links[i], tf_links_target[i], collection
):
# move current index to front of priority list
self.collision_priority.remove(i)
self.collision_priority.insert(0, i)
return True
return False
def plot(self, ax, q, *arg, **kwarg):
if self.geometry_base is not None:
self.geometry_base.plot(ax, self.tf_base, *arg, **kwarg)
tf_links = self.fk_all_links(q)
for i, link in enumerate(self.links):
link.plot(ax, tf_links[i], *arg, **kwarg)
if self.geometry_tool is not None:
self.geometry_tool.plot(ax, tf=tf_links[-1], *arg, **kwarg)
def plot_path(self, ax, joint_space_path):
alpha = np.linspace(1, 0.2, len(joint_space_path))
for i, qi in enumerate(joint_space_path):
self.plot(ax, qi, c=(0.1, 0.2, 0.5, alpha[i]))
def animate_path(self, fig, ax, joint_space_path):
def get_emtpy_lines(ax):
lines = []
for l in self.links:
for s in l.geometry.shapes:
lines.append(s.get_empty_plot_lines(ax, c=(0.1, 0.2, 0.5)))
if self.geometry_tool is not None:
for s in self.geometry_tool.shapes:
lines.append(s.get_empty_plot_lines(ax, c=(0.1, 0.2, 0.5)))
return lines
def update_lines(frame, q_path, lines):
tfs = self.fk_all_links(q_path[frame])
cnt = 0
for tf_l, l in zip(tfs, self.links):
for tf_s, s in zip(l.geometry.tf_s, l.geometry.s):
Ti = np.dot(tf_l, tf_s)
lines[cnt] = s.update_plot_lines(lines[cnt], Ti)
cnt = cnt + 1
if self.geometry_tool is not None:
for tf_s, s in zip(self.geometry_tool.tf_s, self.geometry_tool.s):
tf_j = np.dot(tfs[-1], tf_s)
lines[cnt] = s.update_plot_lines(lines[cnt], tf_j)
cnt = cnt + 1
ls = get_emtpy_lines(ax)
N = len(joint_space_path)
self.animation = animation.FuncAnimation(
fig, update_lines, N, fargs=(joint_space_path, ls), interval=200, blit=False
)
```
#### File: src/acrobotics/workspace_envelope.py
```python
import numpy as np
from .robot import Robot
from .inverse_kinematics.ik_result import IKResult
from tqdm import tqdm
from pyquaternion import Quaternion
class EnvelopeSettings:
def __init__(
self,
sample_distance: float,
num_orientation_samples: int,
max_ik_solutions: int,
):
self.sample_distance = sample_distance
self.num_orientation_samples = num_orientation_samples
self.max_ik_solutions = max_ik_solutions
def sample_position(position: np.ndarray, n: int):
""" Return n random transforms at the given position. """
tf_samples = [Quaternion.random().transformation_matrix for _ in range(n)]
for tfi in tf_samples:
tfi[:3, 3] = position
return tf_samples
def process_ik_solution(robot: Robot, ik_solution: IKResult):
""" Return the number of collision free ik_solutions. """
if ik_solution.success:
q_collision_free = []
for qi in ik_solution.solutions:
if not robot.is_in_self_collision(qi):
q_collision_free.append(qi)
return len(q_collision_free)
else:
return 0
def calculate_reachability(
robot: Robot,
position: np.ndarray,
num_samples: int = 100,
max_ik_solutions: int = 8,
):
"""
Return the fraction of reachable poses at a given position by solving
the inverse kinematics for uniform random orientation samples.
"""
sampled_transforms = sample_position(position, n=num_samples)
reachable_cnt = 0
for transfom in sampled_transforms:
ik_sol = robot.ik(transfom)
reachable_cnt += process_ik_solution(robot, ik_sol)
return reachable_cnt / (max_ik_solutions * num_samples)
def scale(X, from_range, to_range):
Y = (X - from_range[0]) / (from_range[1] - from_range[0])
Y = Y * (to_range[1] - to_range[0]) + to_range[0]
return Y
def generate_positions(max_extension, num_points: int):
steps = np.linspace(-max_extension, max_extension, num_points)
x, y, z = np.meshgrid(steps, steps, steps)
return np.vstack((x.flatten(), y.flatten(), z.flatten())).T
# return np.stack((x, y, z))
def generate_robot_envelope(robot: Robot, settings: EnvelopeSettings):
max_extension = robot.estimate_max_extension()
num_points = int(2 * max_extension / settings.sample_distance)
points = generate_positions(max_extension, num_points)
result = np.zeros((len(points), 4))
for i, point in enumerate(tqdm(points)):
reachability = calculate_reachability(
robot, point, settings.num_orientation_samples, settings.max_ik_solutions
) # TODO: other params
result[i, 0] = reachability
result[i, 1:] = point
return result
```
#### File: acrobotics/tests/test_continuous_cc.py
```python
import time
import fcl
import numpy as np
import matplotlib.pyplot as plt
from acrolib.plotting import get_default_axes3d, plot_reference_frame
from acrolib.geometry import translation
from acrobotics.robot_examples import Kuka
from acrobotics.tool_examples import torch2
from acrobotics.geometry import Scene
from acrobotics.shapes import Box
robot = Kuka()
robot.tool = torch2
DEBUG = False
def show_animation(robot, scene, qa, qb):
q_path = np.linspace(qa, qb, 10)
fig, ax = get_default_axes3d([-0.8, 0.8], [0, 1.6], [-0.2, 1.4])
ax.set_axis_off()
ax.view_init(elev=31, azim=-15)
scene.plot(ax, c="green")
robot.animate_path(fig, ax, q_path)
plt.show()
def test_ccd_1():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.01, 0.01, 1.5)
T_obs = translation(0, 0.5, 0.55)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.0, 1.5, -0.3, 0, 0, 0])
q_goal = np.array([2.0, 1.5, 0.3, 0, 0, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 1: ", res)
show_animation(robot, scene, q_start, q_goal)
def test_ccd_2():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.2, 0.1, 0.01)
T_obs = translation(0, 0.9, 0.55)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.5, 1.5, -0.3, 0, 0.3, 0])
q_goal = np.array([1.5, 1.5, 0.3, 0, -0.3, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 2: ", res)
show_animation(robot, scene, q_start, q_goal)
def test_ccd_3():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.01, 0.2, 0.2)
T_obs = translation(0, 1.2, 0)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.0, 1.2, -0.5, 0, 0, 0])
q_goal = np.array([2.0, 1.2, -0.5, 0, 0, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 3: ", res)
show_animation(robot, scene, q_start, q_goal)
if __name__ == "__main__":
test_ccd_1()
test_ccd_2()
test_ccd_3()
```
#### File: acrobotics/tests/test_planning_sampling_based.py
```python
import pytest
import numpy as np
import acrobotics as ab
# import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from acrolib.quaternion import Quaternion
from acrolib.sampling import SampleMethod
from acrolib.plotting import get_default_axes3d, plot_reference_frame
from acrobotics.robot import Robot
class DummyRobot(Robot):
def __init__(self, is_colliding=False):
self.is_colliding = is_colliding
self.ndof = 6
def is_in_collision(self, joint_position, scene=None):
return self.is_colliding
def fk(self, q):
tf = np.eye(4)
tf[:3, 3] = np.array([1, -2.09, 3])
return tf
def test_tol_quat_pt_with_weights():
path_ori_free = []
for s in np.linspace(0, 1, 3):
xi = 0.8
yi = s * 0.2 + (1 - s) * (-0.2)
zi = 0.2
path_ori_free.append(
ab.TolQuatPt(
[xi, yi, zi],
Quaternion(axis=[1, 0, 0], angle=np.pi),
[ab.NoTolerance(), ab.NoTolerance(), ab.NoTolerance()],
ab.QuaternionTolerance(2.0),
)
)
table = ab.Box(0.5, 0.5, 0.1)
table_tf = np.array(
[[1, 0, 0, 0.80], [0, 1, 0, 0.00], [0, 0, 1, 0.12], [0, 0, 0, 1]]
)
scene1 = ab.Scene([table], [table_tf])
robot = ab.Kuka()
# robot.tool = torch
setup = ab.PlanningSetup(robot, path_ori_free, scene1)
# weights to express the importance of the joints in the cost function
joint_weights = [10.0, 5.0, 1.0, 1.0, 1.0, 1.0]
settings = ab.SamplingSetting(
ab.SearchStrategy.INCREMENTAL,
sample_method=SampleMethod.random_uniform,
num_samples=500,
iterations=2,
tolerance_reduction_factor=2,
weights=joint_weights,
)
solve_set = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.weighted_sum_squared,
sampling_settings=settings,
)
sol = ab.solve(setup, solve_set)
assert sol.success
for qi, s in zip(sol.joint_positions, np.linspace(0, 1, 3)):
xi = 0.8
yi = s * 0.2 + (1 - s) * (-0.2)
zi = 0.2
fk = robot.fk(qi)
pos_fk = fk[:3, 3]
assert_almost_equal(pos_fk, np.array([xi, yi, zi]))
# fig, ax = get_default_axes3d()
# scene1.plot(ax, c="g")
# robot.animate_path(fig, ax, sol.joint_positions)
# plt.show(block=True)
def test_tol_position_pt_planning_problem():
robot = ab.Kuka()
table = ab.Box(0.5, 0.5, 0.1)
table_tf = np.array(
[[1, 0, 0, 0.80], [0, 1, 0, 0.00], [0, 0, 1, 0.12], [0, 0, 0, 1]]
)
scene1 = ab.Scene([table], [table_tf])
# create path
quat = Quaternion(axis=np.array([1, 0, 0]), angle=np.pi)
tolerance = [ab.NoTolerance(), ab.SymmetricTolerance(0.05, 10), ab.NoTolerance()]
first_point = ab.TolPositionPt(np.array([0.9, -0.2, 0.2]), quat, tolerance)
# end_position = np.array([0.9, 0.2, 0.2])
# path = create_line(first_point, end_position, 5)
path = ab.create_arc(
first_point, np.array([0.9, 0.0, 0.2]), np.array([0, 0, 1]), 2 * np.pi, 5
)
planner_settings = ab.SamplingSetting(ab.SearchStrategy.GRID, iterations=1)
solver_settings = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.sum_squared,
sampling_settings=planner_settings,
)
setup = ab.PlanningSetup(robot, path, scene1)
sol = ab.solve(setup, solver_settings)
assert sol.success
for qi, pt in zip(sol.joint_positions, path):
fk = robot.fk(qi)
pos_fk = fk[:3, 3]
pos_pt = pt.pos
R_pt = pt.rotation_matrix
pos_error = R_pt.T @ (pos_fk - pos_pt)
assert_almost_equal(pos_error[0], 0)
assert_almost_equal(pos_error[2], 0)
assert pos_error[1] <= (0.05 + 1e-6)
assert pos_error[1] >= -(0.05 + 1e-6)
# TODO fix this test
def test_euler_pt_planning_problem():
robot = ab.Kuka()
table = ab.Box(0.5, 0.5, 0.1)
table_tf = np.array(
[[1, 0, 0, 0.80], [0, 1, 0, 0.00], [0, 0, 1, 0.00], [0, 0, 0, 1]]
)
scene1 = ab.Scene([table], [table_tf])
# create path
quat = Quaternion(axis=np.array([1, 0, 0]), angle=-3 * np.pi / 4)
pos_tol = 3 * [ab.NoTolerance()]
# rot_tol = 3 * [NoTolerance()]
rot_tol = [
ab.NoTolerance(),
ab.SymmetricTolerance(np.pi / 4, 20),
ab.SymmetricTolerance(np.pi, 20),
]
first_point = ab.TolEulerPt(np.array([0.9, -0.1, 0.2]), quat, pos_tol, rot_tol)
# end_position = np.array([0.9, 0.1, 0.2])
# path = create_line(first_point, end_position, 5)
path = ab.create_arc(
first_point, np.array([0.9, 0.0, 0.2]), np.array([0, 0, 1]), 2 * np.pi, 5
)
planner_settings = ab.SamplingSetting(
ab.SearchStrategy.GRID, iterations=1, tolerance_reduction_factor=2
)
solver_settings = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.sum_squared,
sampling_settings=planner_settings,
)
setup = ab.PlanningSetup(robot, path, scene1)
sol = ab.solve(setup, solver_settings)
assert sol.success
# fig, ax = get_default_axes3d()
# scene1.plot(ax, c="g")
# path_tf = [pt.transformation_matrix for pt in path]
# for tf in path_tf:
# plot_reference_frame(ax, tf, 0.1)
# # for tf in path[0].sample_grid():
# # plot_reference_frame(ax, tf, 0.1)
# fk_tfs = [robot.fk(qi) for qi in sol.joint_positions]
# for tf in fk_tfs:
# plot_reference_frame(ax, tf, 0.1)
# ax.set_axis_off()
# robot.animate_path(fig, ax, sol.joint_positions)
# plt.show(block=True)
def test_state_cost():
robot = ab.Kuka()
table = ab.Box(0.5, 0.5, 0.1)
table_tf = np.array(
[[1, 0, 0, 0.80], [0, 1, 0, 0.00], [0, 0, 1, 0.00], [0, 0, 0, 1]]
)
scene1 = ab.Scene([table], [table_tf])
# create path
quat = Quaternion(axis=np.array([1, 0, 0]), angle=-3 * np.pi / 4)
pos_tol = 3 * [ab.NoTolerance()]
# rot_tol = 3 * [NoTolerance()]
rot_tol = [
ab.NoTolerance(),
ab.SymmetricTolerance(np.pi / 4, 20),
ab.SymmetricTolerance(np.pi, 20),
]
first_point = ab.TolEulerPt(np.array([0.9, -0.1, 0.2]), quat, pos_tol, rot_tol)
# end_position = np.array([0.9, 0.1, 0.2])
# path = create_line(first_point, end_position, 5)
path = ab.create_arc(
first_point, np.array([0.9, 0.0, 0.2]), np.array([0, 0, 1]), 2 * np.pi, 5
)
planner_settings = ab.SamplingSetting(
ab.SearchStrategy.GRID,
iterations=1,
tolerance_reduction_factor=2,
use_state_cost=True,
state_cost_weight=10.0,
)
solver_settings = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.sum_squared,
sampling_settings=planner_settings,
)
setup = ab.PlanningSetup(robot, path, scene1)
sol = ab.solve(setup, solver_settings)
assert sol.success
def test_exceptions():
settings = ab.SamplingSetting(
ab.SearchStrategy.INCREMENTAL,
sample_method=SampleMethod.random_uniform,
num_samples=500,
iterations=2,
tolerance_reduction_factor=2,
)
solve_set = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.weighted_sum_squared,
sampling_settings=settings,
)
setup = ab.PlanningSetup(None, None, None)
with pytest.raises(Exception) as e:
ab.solve(setup, solve_set)
assert (
str(e.value)
== "No weights specified in SamplingSettings for the weighted cost function."
)
robot = ab.Kuka()
scene = ab.Scene([], [])
pos = np.array([1000, 0, 0])
quat = Quaternion(axis=np.array([1, 0, 0]), angle=-3 * np.pi / 4)
path = [ab.TolPositionPt(pos, quat, 3 * [ab.NoTolerance()])]
solve_set2 = ab.SolverSettings(
ab.SolveMethod.sampling_based,
ab.CostFuntionType.sum_squared,
sampling_settings=settings,
)
setup2 = ab.PlanningSetup(robot, path, scene)
with pytest.raises(Exception) as e:
ab.solve(setup2, solve_set2)
assert str(e.value) == f"No valid joint solutions for path point {0}."
```
#### File: acrobotics/tests/test_shapes.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from numpy.testing import assert_almost_equal
from acrolib.geometry import rot_z, rot_y
from acrobotics.shapes import Box, Cylinder
tf_identity = np.eye(4)
def pose_z(alfa, x, y, z):
""" Homogenous transform with rotation around z-axis and translation. """
return np.array(
[
[np.cos(alfa), -np.sin(alfa), 0, x],
[np.sin(alfa), np.cos(alfa), 0, y],
[0, 0, 1, z],
[0, 0, 0, 1],
]
)
class TestShape:
def test_init(self):
Box(1, 2, 3)
def test_get_vertices(self):
b = Box(1, 2, 3)
v = b.get_vertices(tf_identity)
desired = np.array(
[
[-0.5, 1, 1.5],
[-0.5, 1, -1.5],
[-0.5, -1, 1.5],
[-0.5, -1, -1.5],
[0.5, 1, 1.5],
[0.5, 1, -1.5],
[0.5, -1, 1.5],
[0.5, -1, -1.5],
]
)
assert_almost_equal(v, desired)
def test_get_normals(self):
b = Box(1, 2, 3)
n = b.get_normals(tf_identity)
desired = np.array(
[[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
)
assert_almost_equal(n, desired)
def test_set_transform(self):
b = Box(1, 2, 3)
tf = np.eye(4)
tf[0, 3] = 10.5
v = b.get_vertices(tf)
desired = np.array(
[
[10, 1, 1.5],
[10, 1, -1.5],
[10, -1, 1.5],
[10, -1, -1.5],
[11, 1, 1.5],
[11, 1, -1.5],
[11, -1, 1.5],
[11, -1, -1.5],
]
)
assert_almost_equal(v, desired)
def test_set_transform2(self):
b = Box(1, 2, 3)
tf = np.eye(4)
# rotate pi / 2 around x-axis
tf[1:3, 1:3] = np.array([[0, -1], [1, 0]])
v = b.get_vertices(tf)
desired = np.array(
[
[-0.5, -1.5, 1],
[-0.5, 1.5, 1],
[-0.5, -1.5, -1],
[-0.5, 1.5, -1],
[0.5, -1.5, 1],
[0.5, 1.5, 1],
[0.5, -1.5, -1],
[0.5, 1.5, -1],
]
)
assert_almost_equal(v, desired)
def test_get_edges(self):
b = Box(1, 2, 3)
e = b.get_edges(tf_identity)
row, col = e.shape
assert row == 12
assert col == 6
v = b.get_vertices(tf_identity)
# check only one edge
v0 = np.hstack((v[0], v[1]))
assert_almost_equal(v0, e[0])
def test_polyhedron(self):
b = Box(1, 2, 3)
A, b = b.get_polyhedron(np.eye(4))
Aa = np.array(
[[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
)
ba = np.array([0.5, 0.5, 1, 1, 1.5, 1.5])
assert_almost_equal(A, Aa)
assert_almost_equal(b, ba)
def test_polyhedron_transformed(self):
b = Box(1, 2, 3)
tf = pose_z(0.3, 0.1, 0.2, -0.3)
A, b = b.get_polyhedron(tf)
Aa = np.array(
[[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
)
ba = np.array([0.5, 0.5, 1, 1, 1.5, 1.5])
Aa = np.dot(Aa, tf[:3, :3].T)
ba = ba + np.dot(Aa, tf[:3, 3])
assert_almost_equal(A, Aa)
assert_almost_equal(b, ba)
def test_is_in_collision(self):
b1 = Box(1, 1, 1)
b2 = Box(1, 1, 2)
actual = b1.is_in_collision(tf_identity, b2, tf_identity)
assert actual == True
b3 = Box(1, 2, 1)
T3 = pose_z(np.pi / 4, 0.7, 0.7, 0)
assert b1.is_in_collision(tf_identity, b3, T3) == True
b4 = Box(1, 1, 1)
b5 = Box(1, 1, 2)
T4 = pose_z(0, -1, -1, 0)
T5 = pose_z(np.pi / 4, -2, -2, 0)
assert b4.is_in_collision(T4, b5, T5) == False
def test_plot(self):
b1 = Box(1, 2, 3)
fig = plt.figure()
ax = fig.gca(projection="3d")
b1.plot(ax, tf_identity)
assert True
class TestCylinder:
def test_4_faces(self):
cyl = Cylinder(1, 2, approx_faces=4)
n = cyl.get_normals(np.eye(4))
n_desired = np.array(
[[1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
)
assert_almost_equal(n, n_desired)
v = cyl.get_vertices(np.eye(4))
v_desired = np.array(
[
[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[1, 0, -1],
[0, 1, -1],
[-1, 0, -1],
[0, -1, -1],
]
)
v_desired = (rot_z(np.pi / 4) @ v_desired.T).T
assert_almost_equal(v, v_desired)
e = cyl.get_edges(np.eye(4))
e_desired = np.zeros((12, 6))
vd = v_desired
e_desired[0] = np.hstack((vd[3], vd[0]))
e_desired[1] = np.hstack((vd[0], vd[1]))
e_desired[2] = np.hstack((vd[1], vd[2]))
e_desired[3] = np.hstack((vd[2], vd[3]))
e_desired[4] = np.hstack((vd[7], vd[4]))
e_desired[5] = np.hstack((vd[4], vd[5]))
e_desired[6] = np.hstack((vd[5], vd[6]))
e_desired[7] = np.hstack((vd[6], vd[7]))
e_desired[8] = np.hstack((vd[0], vd[4]))
e_desired[9] = np.hstack((vd[1], vd[5]))
e_desired[10] = np.hstack((vd[2], vd[6]))
e_desired[11] = np.hstack((vd[3], vd[7]))
assert e.shape == e_desired.shape
assert_almost_equal(e[0:4], e_desired[0:4])
assert_almost_equal(e[4:8], e_desired[4:8])
assert_almost_equal(e[8:12], e_desired[8:12])
def test_4_faces_transformed(self):
tf = np.eye(4)
tf[:3, 3] = np.array([5, -3, 7])
tf[:3, :3] = rot_y(0.5) @ rot_z(-0.3)
cyl = Cylinder(1, 2, approx_faces=4)
n = cyl.get_normals(tf)
n_desired = np.array(
[[1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
)
n_desired = (tf[:3, :3] @ n_desired.T).T
assert_almost_equal(n, n_desired)
v = cyl.get_vertices(tf)
v_desired = np.array(
[
[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[1, 0, -1],
[0, 1, -1],
[-1, 0, -1],
[0, -1, -1],
]
)
v_desired = (rot_z(np.pi / 4) @ v_desired.T).T
v_desired = (tf[:3, :3] @ v_desired.T).T + tf[:3, 3]
assert_almost_equal(v, v_desired)
def test_plot_cylinder(self):
cyl = Cylinder(1, 2)
fig = plt.figure()
ax = fig.gca(projection="3d")
tf = np.eye(4)
tf[:3, 3] = np.array([0, 5, -3])
tf[:3, :3] = rot_y(np.pi / 4)
cyl.plot(ax, tf, c="k")
# plt.show(block=True)
assert True
```
#### File: acrobotics/tests/test_workspace_envelope.py
```python
import numpy as np
from numpy.testing import assert_almost_equal
from acrobotics.workspace_envelope import (
sample_position,
process_ik_solution,
calculate_reachability,
generate_positions,
generate_robot_envelope,
EnvelopeSettings,
)
from acrobotics.inverse_kinematics.ik_result import IKResult
from acrobotics.robot import Robot
from acrobotics.robot_examples import Kuka
def test_sample_position():
pos = np.array([0.1, 0.2, 0.3])
samples = sample_position(pos, 5)
for tf in samples:
assert_almost_equal(tf[:3, 3], pos)
for i in range(1, len(samples)):
assert np.any(np.not_equal(samples[i], samples[i - 1]))
class DummyRobot(Robot):
def __init__(self):
pass
def is_in_self_collision(self, q):
if q[0] < 0.5:
return False
else:
return True
def test_process_ik_result():
ik_result = IKResult(True, [[0, 0], [0, 0]])
robot = DummyRobot()
res = process_ik_solution(robot, ik_result)
assert res == 2
ik_result = IKResult(True, [[0, 0], [1, 1]])
robot = DummyRobot()
res = process_ik_solution(robot, ik_result)
assert res == 1
ik_result = IKResult(True, [[1, 1], [1, 1]])
robot = DummyRobot()
res = process_ik_solution(robot, ik_result)
assert res == 0
def test_generate_envelop():
robot = Kuka()
settings = EnvelopeSettings(1.0, 10, 8)
we = generate_robot_envelope(robot, settings)
max_extension = robot.estimate_max_extension()
num_points = int(2 * max_extension / settings.sample_distance)
assert we.shape == (num_points ** 3, 4)
```
|
{
"source": "JeroenDM/acrolib",
"score": 3
}
|
#### File: src/acrolib/plotting.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
def get_default_axes3d(xlim=[-1, 1], ylim=[-1, 1], zlim=[-1, 1]):
""" Create a default `mpl_toolkits.mplot3d.Axes3D` object with default
axis limits on all axis from -1 to 1, and labels on the axes.
"""
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlim3d(xlim)
ax.set_ylim3d(ylim)
ax.set_zlim3d(zlim)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
return fig, ax
def plot_reference_frame(ax, tf=None, arrow_length=0.2):
""" Plot xyz-axes on axes3d object
Parameters
----------
ax : mpl_toolkits.mplot3d.Axes3D
Axes object for 3D plotting.
tf : np.array of float
Transform to specify location of axes. Plots in origin if None.
l : float
The length of the axes plotted.
"""
l = arrow_length
x_axis = np.array([[0, l], [0, 0], [0, 0]])
y_axis = np.array([[0, 0], [0, l], [0, 0]])
z_axis = np.array([[0, 0], [0, 0], [0, l]])
if tf is not None:
# rotation
x_axis = np.dot(tf[:3, :3], x_axis)
y_axis = np.dot(tf[:3, :3], y_axis)
z_axis = np.dot(tf[:3, :3], z_axis)
# translation [:, None] numpian way to change shape (add axis)
x_axis = x_axis + tf[:3, 3][:, None]
y_axis = y_axis + tf[:3, 3][:, None]
z_axis = z_axis + tf[:3, 3][:, None]
ax.plot(x_axis[0], x_axis[1], x_axis[2], "-", c="r")
ax.plot(y_axis[0], y_axis[1], y_axis[2], "-", c="g")
ax.plot(z_axis[0], z_axis[1], z_axis[2], "-", c="b")
```
#### File: acrolib/tests/test_plotting.py
```python
import numpy as np
import matplotlib
import mpl_toolkits
from acrolib.plotting import get_default_axes3d, plot_reference_frame
def test_create_axes_3d():
fig, ax = get_default_axes3d()
assert isinstance(fig, matplotlib.pyplot.Figure)
assert isinstance(ax, mpl_toolkits.mplot3d.Axes3D)
def test_plot_reference_frame():
_, ax = get_default_axes3d()
plot_reference_frame(ax)
plot_reference_frame(ax, tf=np.eye(4))
plot_reference_frame(ax, tf=np.eye(4), arrow_length=0.3)
```
|
{
"source": "JeroenDM/moveit_constrained_planning_examples",
"score": 3
}
|
#### File: moveit_constrained_planning_examples/scripts/cart_planning_server.py
```python
from __future__ import print_function
import sys
import time
import rospy
import rospkg
import moveit_commander
import geometry_msgs.msg
import moveit_msgs.msg
import moveit_msgs.srv
GROUP_NAME = "panda_arm"
# GROUP_NAME = "manipulator"
class CartesianPlanningServer:
def __init__(self):
self.mc = moveit_commander.RobotCommander()
self.mg = moveit_commander.MoveGroupCommander(GROUP_NAME)
self.server = rospy.Service(
"cartesian_planning_server",
moveit_msgs.srv.GetMotionPlan,
lambda x: self.handle_request(x)
)
# default settings for Cartesian planning
self.eef_step = 0.01
self.jump_threshold = 0.0
def handle_request(self, request):
"""
Extract Cartesian waypoints from a MotionPlanRequest
and use computeCartesianPath to find a solution.
"""
rospy.loginfo("Received cartesian motion planning request.")
req = request.motion_plan_request
print(req)
resp = moveit_msgs.msg.MotionPlanResponse()
resp.group_name = req.group_name
assert(len(req.reference_trajectories) == 1)
assert(len(req.reference_trajectories[0].cartesian_trajectory) == 1)
cartesian_trajectory = req.reference_trajectories[0].cartesian_trajectory[0]
waypoints = []
for ctp in cartesian_trajectory.points:
waypoints.append(ctp.point.pose)
start_time = time.time()
(plan, fraction) = self.mg.compute_cartesian_path(
waypoints,
self.eef_step,
self.jump_threshold)
resp.planning_time = time.time() - start_time
if fraction == 1.0:
resp.error_code.val = moveit_msgs.msg.MoveItErrorCodes.SUCCESS
resp.trajectory = plan
else:
resp.error_code.val = moveit_msgs.msg.MoveItErrorCodes.GOAL_IN_COLLISION
return resp
if __name__ == '__main__':
rospy.init_node('cart_planning_server', anonymous=True)
server = CartesianPlanningServer()
print("Ready receive planning requests.")
rospy.spin()
```
#### File: moveit_constrained_planning_examples/scripts/load_work_object_case_3.py
```python
import sys
import copy
import rospy
import rospkg
import rosparam
import moveit_commander
import urdfpy
from geometry_msgs.msg import Vector3, Quaternion, Pose, PoseStamped
# The location of the urdf file inside the setup 1 support package
REL_WORK_PATH = "/urdf/work/"
# I moved the task a bit along the x-axis
# but this code was not flexible enough to change the position
# of the work object, so I hardcoded the x offset for now
X_OFFSET = 0.3
def numpy_to_pose(arr):
""" Numpy 4x4 array to geometry_msg.Pose
Code from: https://github.com/eric-wieser/ros_numpy
TODO move this to some utility module if I have one.
"""
from tf import transformations
assert arr.shape == (4, 4)
trans = transformations.translation_from_matrix(arr)
quat = transformations.quaternion_from_matrix(arr)
return Pose(position=Vector3(*trans), orientation=Quaternion(*quat))
def remove_all_objects(scene):
""" Given a planning scene, remove all known objects. """
for name in scene.get_known_object_names():
scene.remove_world_object(name)
def parse_urdf_file(package_name, work_name):
""" Convert urdf file (xml) to python dict.
Using the urdfpy package for now.
Using the xml package from the standard library could be
easier to understand. We can change this in the future
if it becomes a mess.
"""
rospack = rospkg.RosPack()
filepath = rospack.get_path(package_name)
filepath += REL_WORK_PATH
urdf = urdfpy.URDF.load(filepath + work_name + ".urdf")
d = {"links": {}, "joints": {}}
for link in urdf.links:
if link.name == "world" or link.name == "work":
continue
else:
d["links"][link.name] = parse_link(link, filepath)
for joint in urdf.joints:
p = PoseStamped()
p.header.frame_id = joint.parent
p.pose = numpy_to_pose(joint.origin)
d["joints"][joint.name] = {
"pose": p,
"parent": joint.parent,
"child": joint.child
}
return d
def parse_link(link, mesh_path):
""" Assume a link has only a single collision object.
Assume this collision object is a box.
Assume the link named "world" has no collision objects.
link: a urdfpy.urdf.Link object
mesh_path: absolute path of the folder where we have to fine the stl files
"""
assert len(link.collisions) == 1
assert link.name != "world"
assert link.name != "work"
collision = link.collisions[0]
if collision.geometry.box is not None:
data = {"type": "box", "size": link.collisions[0].geometry.box.size}
elif collision.geometry.mesh is not None:
data = {
"type": "mesh",
"filename": mesh_path + collision.geometry.mesh.filename,
"scale": collision.geometry.mesh.scale
}
else:
raise Exception("No mesh of box collision geometry found.")
return data
def publish_parsed_urdf(parsed_urdf, scene):
""" Publish link geometry for every joint's child.
TODO: there is an ugly hardcoded x offset for now.
"""
for name, joint in parsed_urdf["joints"].items():
# get the child link data
link = parsed_urdf["links"][joint["child"]]
pose_stamped = copy.deepcopy(joint["pose"])
pose_stamped.pose.position.x += X_OFFSET
# publish the child links collision geometry
if link["type"] == "box":
scene.add_box(
joint["child"],
pose_stamped,
link["size"]
)
else:
scene.add_mesh(
joint["child"],
pose_stamped,
link["filename"],
link["scale"]
)
if __name__ == "__main__":
rospy.init_node("publish_work")
scene = moveit_commander.PlanningSceneInterface()
rospy.sleep(1.0) # wait for the above things to setup
remove_all_objects(scene)
work = parse_urdf_file("setup_1_support", "kingpin")
publish_parsed_urdf(work, scene)
print("Done!")
```
|
{
"source": "JeroenDM/sampling_based_tube_following_2",
"score": 3
}
|
#### File: sampling_based_tube_following_2/case_2/create_data_figure_9.py
```python
import time
import numpy as np
from acrobotics.path.sampling import SamplingSetting, SampleMethod, SearchStrategy
from acrobotics.planning.types import CostFuntionType, SolveMethod, PlanningSetup
from acrobotics.planning.settings import SolveMethod, OptSettings, SolverSettings
from acrobotics.planning.solver import solve
from definition import create_robot, create_path, create_scene
# =============================================================================
# Define some utilities to run the experiment and process the results.
# ==============================================================================
def create_settings_grid(iters, use_constraints_cost, constraints_cost_weight=1.0):
s = SamplingSetting(
search_strategy=SearchStrategy.GRID,
iterations=iters,
tolerance_reduction_factor=2.0,
use_state_cost=use_constraints_cost,
state_cost_weight=constraints_cost_weight,
)
s2 = SolverSettings(SolveMethod.sampling_based, CostFuntionType.sum_squared, s)
return s2
def create_opt_settins(q_init, cow):
s2 = SolverSettings(
SolveMethod.optimization_based,
CostFuntionType.sum_squared,
opt_settings=OptSettings(
q_init=q_init, max_iters=500, con_objective_weight=cow
),
)
return s2
def calc_mean_deviation(rxyz):
"""Calculate the mean deviation on the x and y rotation
compared to the ideal value, wich is zero since tolerance
is expressed in the local path frame."""
rx = rxyz[:, 0]
ry = rxyz[:, 1]
return np.sum(np.abs(rx) + np.abs(ry))
def JVM(sol):
"""
Calculate Joint Velocity Measure for a solution path.
"""
qp = np.array(sol.joint_positions)
return np.sum(np.diff(qp, axis=0) ** 2)
def Jcon(rxyz):
""" Cacluate the value of the objective related to the path constraints. """
return np.sum(rxyz[:, :2] ** 2)
def calc_tol_dev(robot, path, sol):
"""
Calculate deviation of the welding angles from the nominal path point pose.
"""
rxyz = np.zeros((N_PATH, 3))
for i, qi, pt in zip(range(N_PATH), sol.joint_positions, path):
tol_dev = pt.transform_to_rel_tolerance_deviation(robot.fk(qi))
rxyz[i] = tol_dev[3:]
return rxyz
# =============================================================================
# Run the simulations for the two planners, and different lambda values
# Warning: running the simulation can take a couple of minutes.
# ==============================================================================
N_PATH = 20
robot = create_robot()
scene, start, stop = create_scene(np.array([0.85, 0, 0]))
path = create_path(start, stop, N_PATH, 5, 5, 30)
setup = PlanningSetup(robot, path, scene)
lambda_values = [0.0, 1.0, 3.0, 10.0, 30.0, 100.0]
with open("case_2_sampling_based.csv", "a") as file:
file.write("lambda,mean_dev\n")
for w in lambda_values:
s = create_settings_grid(1, True, w)
sol = solve(setup, s)
rxyz = calc_tol_dev(robot, path, sol)
mean_dev = calc_mean_deviation(rxyz)
file.write(f"{w},{mean_dev}\n")
# Use the home position as an initial guess for the next algorithm
# in general it is not always trivial to find a good intial guess
q_home = np.array([0, 1.5, 0, 0, 0, 0])
q_init = np.ones((N_PATH, 6)) * q_home
# I used a slightly different position for the planning scene by accident
# This does not influence the results much as we focus on the influence of
# lambda here.
scene, start, stop = create_scene(np.array([0.8, 0, 0]))
path = create_path(start, stop, N_PATH, 5, 5, 30)
setup = PlanningSetup(robot, path, scene)
with open("case_2_optimization_based.csv", "a") as file:
file.write("cost,time,mean_dev,lambda,success\n")
for w in lambda_values:
start = time.time()
try:
sol2 = solve(setup, create_opt_settins(q_init, w))
stop = time.time()
rxyz = calc_tol_dev(robot, path, sol2)
mean_dev = calc_mean_deviation(rxyz)
file.write(f"{sol2.path_cost},{stop - start},{mean_dev},{w},1\n")
except:
stop = time.time()
file.write(f"{np.nan},{stop - start},{np.nan},{w},0\n")
```
|
{
"source": "JeroenDM/urdf_to_opw_kinematics",
"score": 3
}
|
#### File: src/urdf_to_opw_kinematics/main.py
```python
import numpy as np
from numpy.linalg import norm
from urdf_to_opw_kinematics.util import angle, Axis, distance, rot_y
DEBUG = True
def check_compatibility(robot):
""" TODO add compatibility tests
now I just check if there are 6 revolute joints
"""
axes = get_joint_axes_from_urdf(robot)
num_joints = len(axes)
if num_joints != 6:
print(robot.name + " has " + str(num_joints) + " joints, not 6.")
return False
return True
def convert(robot):
axes = get_joint_axes_from_urdf(robot)
tool0_position = get_tool0_position(robot, axes)
jo = get_joint_offsets(axes)
sc = get_sign_corrections(axes)
params = get_dimensions(axes, tool0_position, jo)
params['joint_offsets'] = jo
params['sign_corrections'] = sc
return params
def get_joint_axes_from_urdf(robot):
""" Extract joint origin and axis direction from urdf
Save absolute position in base_link, and relative position
with respect to the previous link.
Parameters
----------
robot object from the urdf_parser_py library
Returns
-------
list of Axis objects
"""
joints = robot.joints
axes = []
for i in range(len(joints)):
if joints[i].type == "revolute":
if i > 0:
p_relative = np.array(joints[i].origin.xyz)
p_previous = axes[-1].position
axes.append(Axis(p_previous + p_relative,
p_relative, np.array(joints[i].axis)))
else:
axes.append(Axis(np.array(joints[i].origin.xyz), np.array(
joints[i].origin.xyz), np.array(joints[i].axis)))
return axes
def get_tool0_position(robot, axes):
""" Search for the tool0 link and get absolut position origin
Returns
-------
absolut position as a numpy array of length 3
"""
for joint in robot.joints:
if joint.child == "tool0":
return axes[-1].position + np.array(joint.origin.xyz)
raise ValueError("Failed to find a joint with child link 'tool0'.")
def get_joint_offsets(axes):
""" Calculate joint angle difference between reference pose of opw_kinematics
and the zero pose of the current robot
"""
G1 = axes[0]
G2 = axes[1]
G3 = axes[2]
G4 = axes[3]
G5 = axes[4]
G6 = axes[5]
unit_x = np.array([1.0, 0, 0])
unit_y = np.array([0, 1.0, 0])
unit_z = np.array([0, 0, 1.0])
jo1 = angle(unit_y, G2.direction)
v23 = distance(G2, G3, return_vector=True)
jo2 = angle(unit_z, v23)
#g4_positive = np.array([abs(e) for e in axes[3].direction])
jo3 = angle(unit_z, G4.direction) - jo2
jo4 = angle(unit_y, G5.direction) - jo1
jo5 = angle(G4.direction, G6.direction)
# TODO get ee_y as input and correct for jo1 and j04
ee_y_direction = unit_y
jo6 = angle(ee_y_direction, unit_y)
return [-jo1, -jo2, -jo3, -jo4, -jo5, -jo6]
def get_sign_corrections(axes):
""" Does the axis rotate according to the right hand rule?
Assume all z-axis pointed up and axis along one of the main axes
"""
sc = map(np.sum, [a.direction for a in axes])
return [int(val) for val in sc]
def get_dimensions(axes, tool0_position, jo):
""" Calculate distance parameters c1, c2, c3, c4
and signed distances a1, a2, b
Note
----
The sign of b is not yet implemented and defaults as positive
"""
params = {}
G1 = axes[0]
G2 = axes[1]
G3 = axes[2]
G4 = axes[3]
G5 = axes[4]
G6 = axes[5]
p_ee = tool0_position
unit_x = np.array([1.0, 0, 0])
# TODO use joint offset on first joint to make this more general
# check if a1 is along positive x and g2 is above x-y plane
# this mean that the position of g2 position should be (a1, 0, c1) with a1 > 0
P2 = G2.position
if (P2[0] >= 0 and P2[1] == 0 and P2[2] >= 0):
params['a1'] = P2[0]
params['c1'] = P2[2]
else:
raise ValueError("Wrong orientations of g2.")
# ci's are always positive
params['c2'] = distance(G2, G3)
params['c3'] = distance(G3, G5, along=G4)
# distance between g5 and tool0 along g6
params['c4'] = np.abs(np.dot(G6.direction, p_ee - G5.position))
# calculate sign a2
v34 = distance(G3, G4, return_vector=True)
v34 = np.dot(rot_y(jo[1] + jo[2]), v34)
a2_sign = np.sign(np.dot(unit_x, v34))
params['a2'] = a2_sign * distance(G3, G4)
# TODO sign calculation
# but b is zero in most robots
params['b'] = distance(G3, G4, along=G3)
return params
def get_dimensions_new(axes):
""" (DOES NOT WORK)
Alternative method that could work if we make a lot more assumtions about
the given urdf model.
"""
params = {}
P_0_1 = axes[0].p_rel
P_1_2 = axes[1].p_rel
P_2_3 = axes[2].p_rel
P_3_4 = axes[3].p_rel
P_4_5 = axes[4].p_rel
P_5_6 = axes[5].p_rel
params['c1'] = P_0_1[2] + P_1_2[2]
params['c2'] = norm(P_2_3)
params['c3'] = norm(P_4_5) + P_3_4[0]
params['c4'] = norm(P_5_6)
params['a1'] = np.sqrt(P_1_2[0]**2 + P_1_2[1]**2)
params['a2'] = -np.sqrt(P_3_4[0]**2 + P_3_4[2]**2) # or -P_3_4[2]
params['b'] = P_3_4[1]
return params
```
|
{
"source": "jeroendoggen/blackboard-analysis-tools",
"score": 3
}
|
#### File: blackboard-analysis-tools/blackboard_analysis_tools/logger.py
```python
"""
Blackboard Analysis Tools
Copyright 2013, <NAME>, <EMAIL>
"""
from __future__ import print_function, division # We require Python 2.6+
import logging
import sys
class Logger():
""" Logging class """
logger = 0
def __init__(self, logfile):
self.set_logfile(logfile)
self.info("Starting 'analysis tool': ")
def set_logfile(self, logfile):
"""Set the logfile: for error & info messages"""
try:
self.logfile = logfile
logging.basicConfig(filename=self.logfile,
level=logging.DEBUG,
format="%(asctime)s %(name)s %(levelname)s %(message)s")
self.logger = logging.getLogger(__name__)
except IOError:
self.exit_program("opening the logfile (do you have write permission?)")
def exit_program(self, message):
""" Exit the program with a message
TODO: this should move somewhere else (needed in multiple places)
"""
print("Error while " + message)
print("Closing application")
sys.exit()
def info(self, message):
self.logger.info(message)
```
|
{
"source": "jeroenh/coldsweat",
"score": 2
}
|
#### File: coldsweat/coldsweat/config.py
```python
import os
from ConfigParser import SafeConfigParser
from utilities import Struct
__all__ = [
'load_config',
]
DEFAULTS = {
'min_interval' : '900',
'max_errors' : '50',
'max_history' : '7',
'timeout' : '10',
'processes' : '4',
'level' : 'INFO',
'filename' : '', # Don't log
'static_url' : '',
'load' : ''
}
def load_config(config_path):
'''
Load up configuration settings
'''
parser = SafeConfigParser(DEFAULTS)
converters = {
'min_interval' : parser.getint,
'max_errors' : parser.getint,
'max_history' : parser.getint,
'timeout' : parser.getint,
'processes' : parser.getint,
}
if os.path.exists(config_path):
parser.read(config_path)
else:
raise RuntimeError('Could not find configuration file %s' % config_path)
config = Struct()
for section in parser.sections():
d = { k :
converters[k](section, k) if k in converters else v
for k, v in parser.items(section)
}
config[section] = Struct(d)
return config
```
#### File: coldsweat/coldsweat/controllers.py
```python
import sys, os, re, time, urlparse
from datetime import datetime
from xml.etree import ElementTree
from peewee import JOIN_LEFT_OUTER, fn, IntegrityError
import feedparser
import requests
from requests.exceptions import *
from webob.exc import *
from models import *
from utilities import *
from plugins import trigger_event, load_plugins
from filters import escape_html, status_title
from coldsweat import *
from fetcher import *
class UserController(object):
'''
Base user controller class. Derived classes may need to override the user property
'''
@property
def user(self):
return self._current_user
@user.setter
def user(self, user):
self._current_user = user
def add_subscription(self, feed, group):
'''
Associate a feed/group pair to current user
'''
try:
subscription = Subscription.create(user=self.user, feed=feed, group=group)
except IntegrityError:
logger.debug(u'user %s already has feed %s in her subscriptions' % (self.user.username, feed.self_link))
return None
logger.debug(u'subscribed user %s to feed %s' % (self.user.username, feed.self_link))
return subscription
def remove_subscription(self, feed):
'''
Remove a feed subscription for current user
'''
Subscription.delete().where((Subscription.user == self.user) & (Subscription.feed == feed)).execute()
# ------------------------------------------------------
# Queries
# ------------------------------------------------------
# Entries
def mark_entry(self, entry, status):
'''
Mark an entry as read|unread|saved|unsaved for current user
'''
if status == 'read':
try:
Read.create(user=self.user, entry=entry)
except IntegrityError:
logger.debug(u'entry %s already marked as read, ignored' % entry.id)
return
elif status == 'unread':
count = Read.delete().where((Read.user==self.user) & (Read.entry==entry)).execute()
if not count:
logger.debug(u'entry %s never marked as read, ignored' % entry.id)
return
elif status == 'saved':
try:
Saved.create(user=self.user, entry=entry)
except IntegrityError:
logger.debug(u'entry %s already saved, ignored' % entry.id)
return
elif status == 'unsaved':
count = Saved.delete().where((Saved.user==self.user) & (Saved.entry==entry)).execute()
if not count:
logger.debug(u'entry %s never saved, ignored' % entry.id)
return
logger.debug(u'entry %s %s' % (entry.id, status))
def get_unread_entries(self, *select):
#@@TODO: include saved information too
q = _q(*select).where((Subscription.user == self.user) &
~(Entry.id << Read.select(Read.entry).where(Read.user == self.user))).distinct()
return q
def get_saved_entries(self, *select):
#@@TODO: include read information too
q = _q(*select).where((Subscription.user == self.user) &
(Entry.id << Saved.select(Saved.entry).where(Saved.user == self.user))).distinct()
return q
def get_all_entries(self, *select):
#@@TODO: include read and saved information too
q = _q(*select).where(Subscription.user == self.user).distinct()
return q
def get_group_entries(self, group, *select):
#@@TODO: include read and saved information too
q = _q(*select).where((Subscription.user == self.user) & (Subscription.group == group))
return q
def get_feed_entries(self, feed, *select):
#@@TODO: include read and saved information too
q = _q(*select).where((Subscription.user == self.user) & (Subscription.feed == feed)).distinct()
return q
# Feeds
def get_feeds(self, *select):
select = select or [Feed, fn.Count(Entry.id).alias('entry_count')]
q = Feed.select(*select).join(Entry, JOIN_LEFT_OUTER).switch(Feed).join(Subscription).where(Subscription.user == self.user).group_by(Feed)
return q
def get_group_feeds(self, group):
q = Feed.select().join(Subscription).where((Subscription.user == self.user) & (Subscription.group == group))
return q
# Groups
def get_groups(self):
q = Group.select().join(Subscription).where(Subscription.user == self.user).distinct().order_by(Group.title)
return q
# Shortcut
def _q(*select):
select = select or (Entry, Feed)
q = Entry.select(*select).join(Feed).join(Subscription)
return q
class FeedController(object):
'''
Feed controller class
'''
def add_feed_from_url(self, self_link, fetch_data=False):
'''
Save a new feed object to database via its URL
'''
feed = Feed(self_link=self_link)
return self.add_feed(feed, fetch_data)
def add_feed(self, feed, fetch_data=False):
'''
Save a new feed object to database
'''
feed.self_link = scrub_url(feed.self_link)
try:
previous_feed = Feed.get(self_link_hash=make_sha1_hash(feed.self_link))
logger.debug(u'feed %s has been already added to database, skipped' % feed.self_link)
return previous_feed
except Feed.DoesNotExist:
pass
feed.save()
if fetch_data:
self.fetch_feeds([feed])
return feed
# #@@TODO: delete feed if there are no subscribers
# def remove_feed(self, feed):
# pass
def add_feeds_from_file(self, filename, fetch_data=False):
"""
Add feeds to database reading from a file containing OPML data.
"""
# Map OPML attr keys to Feed model
feed_allowed_attribs = {
'xmlUrl': 'self_link',
'htmlUrl': 'alternate_link',
'title': 'title',
'text': 'title', # Alias for title
}
# Map OPML attr keys to Group model
group_allowed_attribs = {
'title': 'title',
'text': 'title', # Alias for title
}
default_group = Group.get(Group.title == Group.DEFAULT_GROUP)
feeds = []
groups = [default_group]
for event, element in ElementTree.iterparse(filename, events=('start','end')):
if event == 'start':
if (element.tag == 'outline') and ('xmlUrl' not in element.attrib):
# Entering a group
group = Group()
for k, v in element.attrib.items():
if k in group_allowed_attribs:
setattr(group, group_allowed_attribs[k], v)
try:
group = Group.get(Group.title==group.title)
except Group.DoesNotExist:
group.save()
logger.debug(u'added group %s to database' % group.title)
groups.append(group)
elif event == 'end':
if (element.tag == 'outline') and ('xmlUrl' in element.attrib):
# Leaving a feed
feed = Feed()
for k, v in element.attrib.items():
if k in feed_allowed_attribs:
setattr(feed, feed_allowed_attribs[k], v)
feed = self.add_feed(feed, fetch_data)
feeds.append((feed, groups[-1]))
elif element.tag == 'outline':
# Leaving a group
groups.pop()
return feeds
# ------------------------------------------------------
# Fetching
# ------------------------------------------------------
def fetch_feeds(self, feeds):
"""
Fetch given feeds, possibly parallelizing requests
"""
start = time.time()
load_plugins()
logger.debug(u"starting fetcher")
trigger_event('fetch_started')
if config.fetcher.processes:
from multiprocessing import Pool
# Each worker has its own connection
p = Pool(config.fetcher.processes, initializer=connect)
p.map(feed_worker, feeds)
# Exit the worker processes so their connections do not leak
p.close()
else:
# Just sequence requests in this process
for feed in feeds:
feed_worker(feed)
trigger_event('fetch_done', feeds)
logger.info(u"%d feeds checked in %.2fs" % (len(feeds), time.time() - start))
def fetch_all_feeds(self):
"""
Fetch all enabled feeds, possibly parallelizing requests
"""
q = Feed.select().where(Feed.is_enabled==True)
feeds = list(q)
if not feeds:
logger.debug(u"no feeds found to fetch, halted")
return
self.fetch_feeds(feeds)
def feed_worker(feed):
fetcher = Fetcher(feed)
fetcher.update_feed()
```
#### File: coldsweat/coldsweat/fetcher.py
```python
import sys, os, re, time, urlparse
from datetime import datetime
from peewee import IntegrityError
import feedparser
import requests
from requests.exceptions import *
from webob.exc import *
from coldsweat import *
from plugins import trigger_event
from models import *
from utilities import *
from translators import *
import markup
import filters
__all__ = [
'Fetcher',
'fetch_url'
]
FETCH_ICONS_DELTA = 30 # Days
class Fetcher(object):
'''
Fetch a single given feed
'''
def __init__(self, feed):
# Save timestamp for current fetch operation
self.instant = datetime.utcnow()
# Extract netloc
_, self.netloc, _, _, _ = urlparse.urlsplit(feed.self_link)
self.feed = feed
def handle_500(self, response):
'''
Internal server error
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has caused an error on server, skipped" % self.netloc)
raise HTTPInternalServerError
def handle_403(self, response):
'''
Forbidden
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s access was denied, skipped" % self.netloc)
raise HTTPForbidden
def handle_404(self, response):
'''
Not Found
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has been not found, skipped" % self.netloc)
raise HTTPNotFound
def handle_410(self, response):
'''
Gone
'''
self.feed.is_enabled = False
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s is gone, disabled" % self.netloc)
self._synthesize_entry('Feed has been removed from the origin server.')
raise HTTPGone
def handle_304(self, response):
'''
Not modified
'''
logger.debug(u"%s hasn't been modified, skipped" % self.netloc)
self.feed.last_status = response.status_code
raise HTTPNotModified
def handle_301(self, response):
'''
Moved permanently
'''
self_link = response.url
try:
Feed.get(self_link=self_link)
except Feed.DoesNotExist:
self.feed.self_link = self_link
self.feed.last_status = response.status_code
logger.info(u"%s has changed its location, updated to %s" % (self.netloc, self_link))
else:
self.feed.is_enabled = False
self.feed.last_status = DuplicatedFeedError.code
self.feed.error_count += 1
self._synthesize_entry('Feed has a duplicated web address.')
logger.warn(u"new %s location %s is duplicated, disabled" % (self.netloc, self_link))
raise DuplicatedFeedError
def handle_200(self, response):
'''
OK plus redirects
'''
self.feed.etag = response.headers.get('ETag', None)
# Save final status code discarding redirects
self.feed.last_status = response.status_code
handle_307 = handle_200 # Alias
handle_302 = handle_200 # Alias
def update_feed(self):
logger.debug(u"updating %s" % self.netloc)
# Check freshness
for value in [self.feed.last_checked_on, self.feed.last_updated_on]:
if not value:
continue
# No datetime.timedelta since we need to
# deal with large seconds values
delta = datetime_as_epoch(self.instant) - datetime_as_epoch(value)
if delta < config.fetcher.min_interval:
logger.debug(u"%s is below minimun fetch interval, skipped" % self.netloc)
return
try:
response = fetch_url(self.feed.self_link,
timeout=config.fetcher.timeout,
etag=self.feed.etag,
modified_since=self.feed.last_updated_on)
except RequestException:
# Record any network error as 'Service Unavailable'
self.feed.last_status = HTTPServiceUnavailable.code
self.feed.error_count += 1
logger.warn(u"a network error occured while fetching %s, skipped" % self.netloc)
self.check_feed_health()
self.feed.save()
return
self.feed.last_checked_on = self.instant
# Check if we got a redirect first
if response.history:
status = response.history[0].status_code
else:
status = response.status_code
try:
handler = getattr(self, 'handle_%d' % status, None)
if handler:
logger.debug(u"got status %s from server" % status)
handler(response)
else:
self.feed.last_status = status
logger.warn(u"%s replied with unhandled status %d, aborted" % (self.netloc, status))
return
self._parse_feed(response.text)
self._fetch_icon()
except HTTPNotModified:
pass # Nothing to do
except (HTTPError, DuplicatedFeedError):
self.check_feed_health()
finally:
self.feed.save()
def check_feed_health(self):
if config.fetcher.max_errors and self.feed.error_count > config.fetcher.max_errors:
self._synthesize_entry('Feed has accumulated too many errors (last was %s).' % filters.status_title(self.feed.last_status))
logger.warn(u"%s has accomulated too many errors, disabled" % self.netloc)
self.feed.is_enabled = False
return
def update_feed_with_data(self, data):
self._parse_feed(data)
self.feed.save()
def _parse_feed(self, data):
soup = feedparser.parse(data)
# Got parsing error?
if hasattr(soup, 'bozo') and soup.bozo:
logger.debug(u"%s caused a parser error (%s), tried to parse it anyway" % (self.netloc, soup.bozo_exception))
ft = FeedTranslator(soup.feed)
self.feed.last_updated_on = ft.get_timestamp(self.instant)
self.feed.alternate_link = ft.get_alternate_link()
self.feed.title = self.feed.title or ft.get_title() # Do not set again if already set
#entries = []
feed_author = ft.get_author()
for entry_dict in soup.entries:
t = EntryTranslator(entry_dict)
link = t.get_link()
guid = t.get_guid(default=link)
if not guid:
logger.warn(u'could not find GUID for entry from %s, skipped' % self.netloc)
continue
timestamp = t.get_timestamp(self.instant)
content_type, content = t.get_content(('text/plain', ''))
# Skip ancient entries
if config.fetcher.max_history and (self.instant - timestamp).days > config.fetcher.max_history:
logger.debug(u"entry %s from %s is over maximum history, skipped" % (guid, self.netloc))
continue
try:
# If entry is already in database with same hashed GUID, skip it
Entry.get(guid_hash=make_sha1_hash(guid))
logger.debug(u"duplicated entry %s, skipped" % guid)
continue
except Entry.DoesNotExist:
pass
entry = Entry(
feed = self.feed,
guid = guid,
link = link,
title = t.get_title(default='Untitled'),
author = t.get_author() or feed_author,
content = content,
content_type = content_type,
last_updated_on = timestamp
)
# At this point we are pretty sure we doesn't have the entry
# already in the database so alert plugins and save data
trigger_event('entry_parsed', entry, entry_dict)
entry.save()
#@@TODO: entries.append(entry)
logger.debug(u"parsed entry %s from %s" % (guid, self.netloc))
#return entries
def _fetch_icon(self):
if not self.feed.icon or not self.feed.icon_last_updated_on or (self.instant - self.feed.icon_last_updated_on).days > FETCH_ICONS_DELTA:
# Prefer alternate_link if available since self_link could
# point to Feed Burner or similar services
self.feed.icon = self._google_favicon_fetcher(self.feed.alternate_link or self.feed.self_link)
self.feed.icon_last_updated_on = self.instant
logger.debug(u"fetched favicon %s..." % (self.feed.icon[:70]))
def _google_favicon_fetcher(self, url):
'''
Fetch a site favicon via Google service
'''
endpoint = "http://www.google.com/s2/favicons?domain=%s" % urlparse.urlsplit(url).hostname
try:
response = fetch_url(endpoint)
except RequestException, exc:
logger.warn(u"could not fetch favicon for %s (%s)" % (url, exc))
return Feed.DEFAULT_ICON
return make_data_uri(response.headers['Content-Type'], response.content)
def add_synthesized_entry(self, title, content_type, content):
'''
Create an HTML entry for this feed
'''
# Since we don't know the mechanism the feed used to build a GUID for its entries
# synthesize an tag URI from the link and a random string. This makes
# entries internally generated by Coldsweat reasonably globally unique
guid = ENTRY_TAG_URI % make_sha1_hash(self.feed.self_link + make_nonce())
entry = Entry(
feed = self.feed,
guid = guid,
title = title,
author = 'Coldsweat',
content = content,
content_type = content_type,
last_updated_on = self.instant
)
entry.save()
logger.debug(u"synthesized entry %s" % guid)
return entry
def _synthesize_entry(self, reason):
title = u'This feed has been disabled'
content = render_template(os.path.join(template_dir, '_entry_feed_disabled.html'), {'reason': reason})
return self.add_synthesized_entry(title, 'text/html', content)
def fetch_url(url, timeout=10, etag=None, modified_since=None):
'''
Fecth a given URL optionally issuing a 'Conditional GET' request
'''
request_headers = {
'User-Agent': USER_AGENT
}
# Conditional GET headers
if etag and modified_since:
logger.debug(u"fetching %s with a conditional GET (%s %s)" % (url, etag, format_http_datetime(modified_since)))
request_headers['If-None-Match'] = etag
request_headers['If-Modified-Since'] = format_http_datetime(modified_since)
try:
response = requests.get(url, timeout=timeout, headers=request_headers)
except RequestException, exc:
logger.debug(u"tried to fetch %s but got %s" % (url, exc.__class__.__name__))
raise exc
return response
# ------------------------------------------------------
# Custom error codes 9xx & exceptions
# ------------------------------------------------------
class DuplicatedFeedError(Exception):
code = 900
title = 'Duplicated feed'
explanation = 'Feed address matches another already present in the database.'
# Update WebOb status codes map
for klass in (DuplicatedFeedError,):
status_map[klass.code] = klass
```
#### File: coldsweat/tests/strip.py
```python
from ..markup import strip_html
def run_tests():
tests = [
('a', 'a'), # Identity
('a <p class="c"><span>b</span></p> a', 'a b a'),
(u'à <p class="c"><span>b</span></p> à', u'à b à'), # Unicode
('a&a<a>', 'a&a<a>'), # Test unescape of entity and char reference too
('<span>a</span>', 'a'),
('<span>a', 'a'), # Unclosed elements
('<p><span>a</p>', 'a'),
('<foo attr=1><bar />a</foo>', 'a'), # Non HTML tags
]
for value, wanted in tests:
assert strip_html(value) == wanted
if __name__ == "__main__":
run_tests()
```
|
{
"source": "jeroenj/youtube-dl",
"score": 2
}
|
#### File: youtube-dl/test/test_youtube_signature.py
```python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from test.helper import FakeYDL
from youtube_dl.extractor import YoutubeIE
from youtube_dl.jsinterp import JSInterpreter
from youtube_dl.compat import compat_str, compat_urlretrieve
_SIG_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
)
]
_NSIG_TESTS = [
(
'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js',
'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w',
),
(
'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js',
'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN',
),
(
'https://www.youtube.com/s/player/2dfe380c/player_ias.vflset/en_US/base.js',
'oBo2h5euWy6osrUt', '3DIBbn3qdQ',
),
(
'https://www.youtube.com/s/player/f1ca6900/player_ias.vflset/en_US/base.js',
'cu3wyu6LQn2hse', 'jvxetvmlI9AN9Q',
),
(
'https://www.youtube.com/s/player/8040e515/player_ias.vflset/en_US/base.js',
'wvOFaY-yjgDuIEg5', 'HkfBFDHmgw4rsw',
),
(
'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js',
'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw',
),
]
class TestPlayerInfo(unittest.TestCase):
def test_youtube_extract_player_info(self):
PLAYER_URLS = (
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
# obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'),
('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'),
('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
)
for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id)
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata/sigs')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def tearDown(self):
try:
for f in os.listdir(self.TESTDATA_DIR):
os.remove(f)
except OSError:
pass
def t_factory(name, sig_func, url_pattern):
def make_tfunc(url, sig_input, expected_sig):
m = url_pattern.match(url)
assert m, '%r should follow URL format' % url
test_id = m.group('id')
def test_func(self):
basename = 'player-{0}-{1}.js'.format(name, test_id)
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
self.assertEqual(sig_func(jscode, sig_input), expected_sig)
test_func.__name__ = str('test_{0}_js_{1}'.format(name, test_id))
setattr(TestSignature, test_func.__name__, test_func)
return make_tfunc
def signature(jscode, sig_input):
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
return func(src_sig)
def n_sig(jscode, sig_input):
funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
return JSInterpreter(jscode).call_function(funcname, sig_input)
make_sig_test = t_factory(
'signature', signature, re.compile(r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$'))
for test_spec in _SIG_TESTS:
make_sig_test(*test_spec)
make_nsig_test = t_factory(
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$'))
for test_spec in _NSIG_TESTS:
make_nsig_test(*test_spec)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeroenj/yt-dlp",
"score": 2
}
|
#### File: yt-dlp/yt_dlp/compat.py
```python
import asyncio
import base64
import collections
import ctypes
import getpass
import html
import html.parser
import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import itertools
import optparse
import os
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import tokenize
import urllib
import xml.etree.ElementTree as etree
from subprocess import DEVNULL
# HTMLParseError has been deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exception handling
class compat_HTMLParseError(Exception):
pass
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
compat_os_name = os._name if os.name == 'java' else os.name
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
else:
from shlex import quote as compat_shlex_quote
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
def compat_setenv(key, value, env=os.environ):
env[key] = value
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
def compat_realpath(path):
while os.path.islink(path):
path = os.path.abspath(os.readlink(path))
return path
else:
compat_realpath = os.path.realpath
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
try:
compat_Pattern = re.Pattern
except AttributeError:
compat_Pattern = type(re.compile(''))
try:
compat_Match = re.Match
except AttributeError:
compat_Match = type(re.compile('').match(''))
try:
compat_asyncio_run = asyncio.run # >= 3.7
except AttributeError:
def compat_asyncio_run(coro):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(coro)
asyncio.run = compat_asyncio_run
try: # >= 3.7
asyncio.tasks.all_tasks
except AttributeError:
asyncio.tasks.all_tasks = asyncio.tasks.Task.all_tasks
try:
import websockets as compat_websockets
except ImportError:
compat_websockets = None
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
# See https://github.com/yt-dlp/yt-dlp/issues/792
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ:
_userhome = os.environ['HOME']
def compat_expanduser(path):
if not path.startswith('~'):
return path
i = path.replace('\\', '/', 1).find('/') # ~user
if i < 0:
i = len(path)
userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
try:
from Cryptodome.Cipher import AES as compat_pycrypto_AES
except ImportError:
try:
from Crypto.Cipher import AES as compat_pycrypto_AES
except ImportError:
compat_pycrypto_AES = None
try:
import brotlicffi as compat_brotli
except ImportError:
try:
import brotli as compat_brotli
except ImportError:
compat_brotli = None
WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
if compat_os_name != 'nt':
return
global WINDOWS_VT_MODE
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
try:
subprocess.Popen('', shell=True, startupinfo=startupinfo)
WINDOWS_VT_MODE = True
except Exception:
pass
# Deprecated
compat_basestring = str
compat_chr = chr
compat_filter = filter
compat_input = input
compat_integer_types = (int, )
compat_kwargs = lambda kwargs: kwargs
compat_map = map
compat_numeric_types = (int, float, complex)
compat_str = str
compat_xpath = lambda xpath: xpath
compat_zip = zip
compat_collections_abc = collections.abc
compat_HTMLParser = html.parser.HTMLParser
compat_HTTPError = urllib.error.HTTPError
compat_Struct = struct.Struct
compat_b64decode = base64.b64decode
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = compat_cookiejar.Cookie
compat_cookies = http.cookies
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = compat_html_entities.html5
compat_http_client = http.client
compat_http_server = http.server
compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs
compat_shlex_split = shlex.split
compat_socket_create_connection = socket.create_connection
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_subprocess_get_DEVNULL = lambda: DEVNULL
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse = urllib.parse
compat_urllib_parse_quote = urllib.parse.quote
compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request = urllib.request
compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_response = urllib.response
compat_urlparse = urllib.parse
compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError
# Set public objects
__all__ = [
'WINDOWS_VT_MODE',
'compat_HTMLParseError',
'compat_HTMLParser',
'compat_HTTPError',
'compat_Match',
'compat_Pattern',
'compat_Struct',
'compat_asyncio_run',
'compat_b64decode',
'compat_basestring',
'compat_brotli',
'compat_chr',
'compat_collections_abc',
'compat_cookiejar',
'compat_cookiejar_Cookie',
'compat_cookies',
'compat_cookies_SimpleCookie',
'compat_ctypes_WINFUNCTYPE',
'compat_etree_Element',
'compat_etree_fromstring',
'compat_etree_register_namespace',
'compat_expanduser',
'compat_filter',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_integer_types',
'compat_itertools_count',
'compat_kwargs',
'compat_map',
'compat_numeric_types',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_pycrypto_AES',
'compat_realpath',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_quote',
'compat_urllib_parse_quote_plus',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_parse_urlunparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_websockets',
'compat_xml_parse_error',
'compat_xpath',
'compat_zip',
'windows_enable_vt_mode',
'workaround_optparse_bug9161',
]
```
#### File: yt_dlp/extractor/adn.py
```python
from __future__ import unicode_literals
import base64
import binascii
import json
import os
import random
from .common import InfoExtractor
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import (
compat_HTTPError,
compat_b64decode,
)
from ..utils import (
ass_subtitles_timecode,
bytes_to_intlist,
bytes_to_long,
ExtractorError,
float_or_none,
int_or_none,
intlist_to_bytes,
long_to_bytes,
pkcs1pad,
strip_or_none,
try_get,
unified_strdate,
urlencode_postdata,
)
class ADNIE(InfoExtractor):
IE_DESC = 'Anime Digital Network'
_VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
'md5': '0319c99885ff5547565cacb4f3f9348d',
'info_dict': {
'id': '7778',
'ext': 'mp4',
'title': 'Blue Exorcist - Kyôto Saga - Episode 1',
'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
'series': 'Blue Exorcist - Kyôto Saga',
'duration': 1467,
'release_date': '20170106',
'comment_count': int,
'average_rating': float,
'season_number': 2,
'episode': 'Début des hostilités',
'episode_number': 1,
}
}
_NETRC_MACHINE = 'animedigitalnetwork'
_BASE_URL = 'http://animedigitalnetwork.fr'
_API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
_HEADERS = {}
_LOGIN_ERR_MESSAGE = 'Unable to log in'
_RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
_POS_ALIGN_MAP = {
'start': 1,
'end': 3,
}
_LINE_ALIGN_MAP = {
'middle': 8,
'end': 4,
}
def _get_subtitles(self, sub_url, video_id):
if not sub_url:
return None
enc_subtitles = self._download_webpage(
sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}'
subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')
if subtitle_location:
enc_subtitles = self._download_webpage(
subtitle_location, video_id, 'Downloading subtitles data',
fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})
if not enc_subtitles:
return None
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
compat_b64decode(enc_subtitles[24:]),
binascii.unhexlify(self._K + 'ab9f52f5baae7c72'),
compat_b64decode(enc_subtitles[:24])))
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
if not subtitles_json:
return None
subtitles = {}
for sub_lang, sub in subtitles_json.items():
ssa = '''[Script Info]
ScriptType:V4.00
[V4 Styles]
Format: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding
Style: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0
[Events]
Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
for current in sub:
start, end, text, line_align, position_align = (
float_or_none(current.get('startTime')),
float_or_none(current.get('endTime')),
current.get('text'), current.get('lineAlign'),
current.get('positionAlign'))
if start is None or end is None or text is None:
continue
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
ass_subtitles_timecode(start),
ass_subtitles_timecode(end),
'{\\a%d}' % alignment if alignment != 2 else '',
text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}'))
if sub_lang == 'vostf':
sub_lang = 'fr'
subtitles.setdefault(sub_lang, []).extend([{
'ext': 'json',
'data': json.dumps(sub),
}, {
'ext': 'ssa',
'data': ssa,
}])
return subtitles
def _perform_login(self, username, password):
try:
access_token = (self._download_json(
self._API_BASE_URL + 'authentication/login', None,
'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False,
data=urlencode_postdata({
'password': password,
'rememberMe': False,
'source': 'Web',
'username': username,
})) or {}).get('accessToken')
if access_token:
self._HEADERS = {'authorization': 'Bearer ' + access_token}
except ExtractorError as e:
message = None
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
resp = self._parse_json(
e.cause.read().decode(), None, fatal=False) or {}
message = resp.get('message') or resp.get('code')
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
def _real_extract(self, url):
video_id = self._match_id(url)
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
player = self._download_json(
video_base_url + 'configuration', video_id,
'Downloading player config JSON metadata',
headers=self._HEADERS)['player']
options = player['options']
user = options['user']
if not user.get('hasAccess'):
self.raise_login_required()
token = self._download_json(
user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
video_id, 'Downloading access token', headers={
'x-player-refresh-token': user['refreshToken']
}, data=b'')['token']
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
message = bytes_to_intlist(json.dumps({
'k': self._K,
't': token,
}))
# Sometimes authentication fails for no good reason, retry with
# a different random padding
links_data = None
for _ in range(3):
padded_message = intlist_to_bytes(pkcs1pad(message, 128))
n, e = self._RSA_KEY
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
authorization = base64.b64encode(encrypted_message).decode()
try:
links_data = self._download_json(
links_url, video_id, 'Downloading links JSON metadata', headers={
'X-Player-Token': authorization
}, query={
'freeWithAds': 'true',
'adaptive': 'false',
'withMetadata': 'true',
'source': 'Web'
})
break
except ExtractorError as e:
if not isinstance(e.cause, compat_HTTPError):
raise e
if e.cause.code == 401:
# This usually goes away with a different random pkcs1pad, so retry
continue
error = self._parse_json(e.cause.read(), video_id)
message = error.get('message')
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
self.raise_geo_restricted(msg=message)
raise ExtractorError(message)
else:
raise ExtractorError('Giving up retrying')
links = links_data.get('links') or {}
metas = links_data.get('metadata') or {}
sub_url = (links.get('subtitles') or {}).get('all')
video_info = links_data.get('video') or {}
title = metas['title']
formats = []
for format_id, qualities in (links.get('streaming') or {}).items():
if not isinstance(qualities, dict):
continue
for quality, load_balancer_url in qualities.items():
load_balancer_data = self._download_json(
load_balancer_url, video_id,
'Downloading %s %s JSON metadata' % (format_id, quality),
fatal=False) or {}
m3u8_url = load_balancer_data.get('location')
if not m3u8_url:
continue
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False)
if format_id == 'vf':
for f in m3u8_formats:
f['language'] = 'fr'
formats.extend(m3u8_formats)
self._sort_formats(formats)
video = (self._download_json(
self._API_BASE_URL + 'video/%s' % video_id, video_id,
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
show = video.get('show') or {}
return {
'id': video_id,
'title': title,
'description': strip_or_none(metas.get('summary') or video.get('summary')),
'thumbnail': video_info.get('image') or player.get('image'),
'formats': formats,
'subtitles': self.extract_subtitles(sub_url, video_id),
'episode': metas.get('subtitle') or video.get('name'),
'episode_number': int_or_none(video.get('shortNumber')),
'series': show.get('title'),
'season_number': int_or_none(video.get('season')),
'duration': int_or_none(video_info.get('duration') or video.get('duration')),
'release_date': unified_strdate(video.get('releaseDate')),
'average_rating': float_or_none(video.get('rating') or metas.get('rating')),
'comment_count': int_or_none(video.get('commentsCount')),
}
```
#### File: yt_dlp/extractor/cctv.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
try_get,
unified_timestamp,
)
class CCTVIE(InfoExtractor):
IE_DESC = '央视网'
_VALID_URL = r'https?://(?:(?:[^/]+)\.(?:cntv|cctv)\.(?:com|cn)|(?:www\.)?ncpa-classic\.com)/(?:[^/]+/)*?(?P<id>[^/?#&]+?)(?:/index)?(?:\.s?html|[?#&]|$)'
_TESTS = [{
# fo.addVariable("videoCenterId","id")
'url': 'http://sports.cntv.cn/2016/02/12/ARTIaBRxv4rTT1yWf1frW2wi160212.shtml',
'md5': 'd61ec00a493e09da810bf406a078f691',
'info_dict': {
'id': '5ecdbeab623f4973b40ff25f18b174e8',
'ext': 'mp4',
'title': '[NBA]二少联手砍下46分 雷霆主场击败鹈鹕(快讯)',
'description': 'md5:7e14a5328dc5eb3d1cd6afbbe0574e95',
'duration': 98,
'uploader': 'songjunjie',
'timestamp': 1455279956,
'upload_date': '20160212',
},
}, {
# var guid = "id"
'url': 'http://tv.cctv.com/2016/02/05/VIDEUS7apq3lKrHG9Dncm03B160205.shtml',
'info_dict': {
'id': 'efc5d49e5b3b4ab2b34f3a502b73d3ae',
'ext': 'mp4',
'title': '[赛车]“车王”舒马赫恢复情况成谜(快讯)',
'description': '2月4日,蒙特泽莫罗透露了关于“车王”舒马赫恢复情况,但情况是否属实遭到了质疑。',
'duration': 37,
'uploader': 'shujun',
'timestamp': 1454677291,
'upload_date': '20160205',
},
'params': {
'skip_download': True,
},
}, {
# changePlayer('id')
'url': 'http://english.cntv.cn/special/four_comprehensives/index.shtml',
'info_dict': {
'id': '4bb9bb4db7a6471ba85fdeda5af0381e',
'ext': 'mp4',
'title': 'NHnews008 ANNUAL POLITICAL SEASON',
'description': 'Four Comprehensives',
'duration': 60,
'uploader': 'zhangyunlei',
'timestamp': 1425385521,
'upload_date': '20150303',
},
'params': {
'skip_download': True,
},
}, {
# loadvideo('id')
'url': 'http://cctv.cntv.cn/lm/tvseries_russian/yilugesanghua/index.shtml',
'info_dict': {
'id': 'b15f009ff45c43968b9af583fc2e04b2',
'ext': 'mp4',
'title': 'Путь,усыпанный космеями Серия 1',
'description': 'Путь, усыпанный космеями',
'duration': 2645,
'uploader': 'renxue',
'timestamp': 1477479241,
'upload_date': '20161026',
},
'params': {
'skip_download': True,
},
}, {
# var initMyAray = 'id'
'url': 'http://www.ncpa-classic.com/2013/05/22/VIDE1369219508996867.shtml',
'info_dict': {
'id': 'a194cfa7f18c426b823d876668325946',
'ext': 'mp4',
'title': '小泽征尔音乐塾 音乐梦想无国界',
'duration': 2173,
'timestamp': 1369248264,
'upload_date': '20130522',
},
'params': {
'skip_download': True,
},
}, {
# var ids = ["id"]
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
'info_dict': {
'id': 'a8606119a4884588a79d81c02abecc16',
'ext': 'mp3',
'title': '来自维也纳的新年贺礼',
'description': 'md5:f13764ae8dd484e84dd4b39d5bcba2a7',
'duration': 1578,
'uploader': 'djy',
'timestamp': 1482942419,
'upload_date': '20161228',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://ent.cntv.cn/2016/01/18/ARTIjprSSJH8DryTVr5Bx8Wb160118.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cntv.cn/video/C39296/e0210d949f113ddfb38d31f00a4e5c44',
'only_matching': True,
}, {
'url': 'http://english.cntv.cn/2016/09/03/VIDEhnkB5y9AgHyIEVphCEz1160903.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cctv.com/2016/09/07/VIDE5C1FnlX5bUywlrjhxXOV160907.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cntv.cn/video/C39296/95cfac44cabd3ddc4a9438780a4e5c44',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)',
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
r'var\s+ids\s*=\s*\[["\']([\da-fA-F]+)'],
webpage, 'video id')
data = self._download_json(
'http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do', video_id,
query={
'pid': video_id,
'url': url,
'idl': 32,
'idlr': 32,
'modifyed': 'false',
})
title = data['title']
formats = []
video = data.get('video')
if isinstance(video, dict):
for quality, chapters_key in enumerate(('lowChapters', 'chapters')):
video_url = try_get(
video, lambda x: x[chapters_key][0]['url'], compat_str)
if video_url:
formats.append({
'url': video_url,
'format_id': 'http',
'quality': quality,
# Sample clip
'preference': -10
})
hls_url = try_get(data, lambda x: x['hls_url'], compat_str)
if hls_url:
hls_url = re.sub(r'maxbr=\d+&?', '', hls_url)
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
self._sort_formats(formats)
uploader = data.get('editer_name')
description = self._html_search_meta(
'description', webpage, default=None)
timestamp = unified_timestamp(data.get('f_pgmtime'))
duration = float_or_none(try_get(video, lambda x: x['totalLength']))
return {
'id': video_id,
'title': title,
'description': description,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
```
#### File: yt_dlp/extractor/digitalconcerthall.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_resolution,
traverse_obj,
try_get,
urlencode_postdata,
)
class DigitalConcertHallIE(InfoExtractor):
IE_DESC = 'DigitalConcertHall extractor'
_VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'
_OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
_ACCESS_TOKEN = None
_NETRC_MACHINE = 'digitalconcerthall'
_TESTS = [{
'note': 'Playlist with only one video',
'url': 'https://www.digitalconcerthall.com/en/concert/53201',
'info_dict': {
'id': '53201-1',
'ext': 'mp4',
'composer': '<NAME>',
'title': '[Magic Night]',
'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$',
'upload_date': '20210624',
'timestamp': 1624548600,
'duration': 2798,
'album_artist': 'Members of the Berliner Philharmoniker / <NAME>',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'Concert with several works and an interview',
'url': 'https://www.digitalconcerthall.com/en/concert/53785',
'info_dict': {
'id': '53785',
'album_artist': 'Berliner Philharmoniker / <NAME>',
'title': 'Kir<NAME>renko conducts Mendelssohn and Shostakovich',
},
'params': {'skip_download': 'm3u8'},
'playlist_count': 3,
}]
def _perform_login(self, username, password):
token_response = self._download_json(
self._OAUTH_URL,
None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({
'affiliate': 'none',
'grant_type': 'device',
'device_vendor': 'unknown',
'app_id': 'dch.webapp',
'app_version': '1.0.0',
'client_secret': '<KEY>',
}), headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
self._ACCESS_TOKEN = token_response['access_token']
try:
self._download_json(
self._OAUTH_URL,
None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({
'grant_type': 'password',
'username': username,
'password': password,
}), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://www.digitalconcerthall.com',
'Authorization': f'Bearer {self._ACCESS_TOKEN}'
})
except ExtractorError:
self.raise_login_required(msg='Login info incorrect')
def _real_initialize(self):
if not self._ACCESS_TOKEN:
self.raise_login_required(method='password')
def _entries(self, items, language, **kwargs):
for item in items:
video_id = item['id']
stream_info = self._download_json(
self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={
'Accept': 'application/json',
'Authorization': f'Bearer {self._ACCESS_TOKEN}',
'Accept-Language': language
})
m3u8_url = traverse_obj(
stream_info, ('channel', lambda x: x.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)
self._sort_formats(formats)
yield {
'id': video_id,
'title': item.get('title'),
'composer': item.get('name_composer'),
'url': m3u8_url,
'formats': formats,
'duration': item.get('duration_total'),
'timestamp': traverse_obj(item, ('date', 'published')),
'description': item.get('short_description') or stream_info.get('short_description'),
**kwargs,
'chapters': [{
'start_time': chapter.get('time'),
'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),
'title': chapter.get('text'),
} for chapter in item['cuepoints']] if item.get('cuepoints') else None,
}
def _real_extract(self, url):
language, video_id = self._match_valid_url(url).group('language', 'id')
if not language:
language = 'en'
thumbnail_url = self._html_search_regex(
r'(https?://images\.digitalconcerthall\.com/cms/thumbnails/.*\.jpg)',
self._download_webpage(url, video_id), 'thumbnail')
thumbnails = [{
'url': thumbnail_url,
**parse_resolution(thumbnail_url)
}]
vid_info = self._download_json(
f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={
'Accept': 'application/json',
'Accept-Language': language
})
album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')
return {
'_type': 'playlist',
'id': video_id,
'title': vid_info.get('title'),
'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,
thumbnails=thumbnails, album_artist=album_artist),
'thumbnails': thumbnails,
'album_artist': album_artist,
}
```
#### File: yt_dlp/extractor/doodstream.py
```python
from __future__ import unicode_literals
import string
import random
import time
from .common import InfoExtractor
class DoodStreamIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dood\.(?:to|watch)/[ed]/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'http://dood.to/e/5s1wmbdacezb',
'md5': '4568b83b31e13242b3f1ff96c55f0595',
'info_dict': {
'id': '5s1wmbdacezb',
'ext': 'mp4',
'title': 'Kat Wonders - Monthly May 2020',
'description': 'Kat Wonders - Monthly May 2020 | DoodStream.com',
'thumbnail': 'https://img.doodcdn.com/snaps/flyus84qgl2fsk4g.jpg',
}
}, {
'url': 'http://dood.watch/d/5s1wmbdacezb',
'md5': '4568b83b31e13242b3f1ff96c55f0595',
'info_dict': {
'id': '5s1wmbdacezb',
'ext': 'mp4',
'title': 'Kat Wonders - Monthly May 2020',
'description': 'Kat Wonders - Monthly May 2020 | DoodStream.com',
'thumbnail': 'https://img.doodcdn.com/snaps/flyus84qgl2fsk4g.jpg',
}
}, {
'url': 'https://dood.to/d/jzrxn12t2s7n',
'md5': '3207e199426eca7c2aa23c2872e6728a',
'info_dict': {
'id': 'jzrxn12t2s7n',
'ext': 'mp4',
'title': '<NAME>ute ALLWAYSWELL',
'description': '<NAME> ALLWAYSWELL | DoodStream.com',
'thumbnail': 'https://img.doodcdn.com/snaps/8edqd5nppkac3x8u.jpg',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
url = f'https://dood.to/e/{video_id}'
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None)
thumb = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None)
token = self._html_search_regex(r'[?&]token=([a-z0-9]+)[&\']', webpage, 'token')
description = self._html_search_meta(
['og:description', 'description', 'twitter:description'], webpage, default=None)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/66.0',
'referer': url
}
pass_md5 = self._html_search_regex(r'(/pass_md5.*?)\'', webpage, 'pass_md5')
final_url = ''.join((
self._download_webpage(f'https://dood.to{pass_md5}', video_id, headers=headers),
*(random.choice(string.ascii_letters + string.digits) for _ in range(10)),
f'?token={token}&expiry={int(time.time() * 1000)}',
))
return {
'id': video_id,
'title': title,
'url': final_url,
'http_headers': headers,
'ext': 'mp4',
'description': description,
'thumbnail': thumb,
}
```
#### File: yt_dlp/extractor/eroprofile.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
merge_dicts,
)
class EroProfileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/view/(?P<id>[^/]+)'
_LOGIN_URL = 'http://www.eroprofile.com/auth/auth.php?'
_NETRC_MACHINE = 'eroprofile'
_TESTS = [{
'url': 'http://www.eroprofile.com/m/videos/view/sexy-babe-softcore',
'md5': 'c26f351332edf23e1ea28ce9ec9de32f',
'info_dict': {
'id': '3733775',
'display_id': 'sexy-babe-softcore',
'ext': 'm4v',
'title': 'sexy babe softcore',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': 'Video not found',
}, {
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
'md5': '1baa9602ede46ce904c431f5418d8916',
'info_dict': {
'id': '1133519',
'ext': 'm4v',
'title': 'Try It On Pee_cut_2.wmv - 4shared.com - file sharing - download movie file',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': 'Requires login',
}]
def _perform_login(self, username, password):
query = compat_urllib_parse_urlencode({
'username': username,
'password': password,
'url': 'http://www.eroprofile.com/',
})
login_url = self._LOGIN_URL + query
login_page = self._download_webpage(login_url, None, False)
m = re.search(r'Your username or password was incorrect\.', login_page)
if m:
raise ExtractorError(
'Wrong username and/or password.', expected=True)
self.report_login()
redirect_url = self._search_regex(
r'<script[^>]+?src="([^"]+)"', login_page, 'login redirect url')
self._download_webpage(redirect_url, None, False)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
webpage, 'video id', default=None)
title = self._html_search_regex(
(r'Title:</th><td>([^<]+)</td>', r'<h1[^>]*>(.+?)</h1>'),
webpage, 'title')
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
return merge_dicts(info, {
'id': video_id,
'display_id': display_id,
'title': title,
'age_limit': 18,
})
class EroProfileAlbumIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/album/(?P<id>[^/]+)'
IE_NAME = 'EroProfile:album'
_TESTS = [{
'url': 'https://www.eroprofile.com/m/videos/album/BBW-2-893',
'info_dict': {
'id': 'BBW-2-893',
'title': 'BBW 2'
},
'playlist_mincount': 486,
},
]
def _extract_from_page(self, page):
for url in re.findall(r'href=".*?(/m/videos/view/[^"]+)"', page):
yield self.url_result(f'https://www.eroprofile.com{url}', EroProfileIE.ie_key())
def _entries(self, playlist_id, first_page):
yield from self._extract_from_page(first_page)
page_urls = re.findall(rf'href=".*?(/m/videos/album/{playlist_id}\?pnum=(\d+))"', first_page)
max_page = max(int(n) for _, n in page_urls)
for n in range(2, max_page + 1):
url = f'https://www.eroprofile.com/m/videos/album/{playlist_id}?pnum={n}'
yield from self._extract_from_page(
self._download_webpage(url, playlist_id,
note=f'Downloading playlist page {int(n) - 1}'))
def _real_extract(self, url):
playlist_id = self._match_id(url)
first_page = self._download_webpage(url, playlist_id, note='Downloading playlist')
playlist_title = self._search_regex(
r'<title>Album: (.*) - EroProfile</title>', first_page, 'playlist_title')
return self.playlist_result(self._entries(playlist_id, first_page), playlist_id, playlist_title)
```
#### File: yt_dlp/extractor/iprima.py
```python
from __future__ import unicode_literals
import re
import time
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
urlencode_postdata,
ExtractorError,
parse_qs
)
class IPrimaIE(InfoExtractor):
_VALID_URL = r'https?://(?!cnn)(?:[^/]+)\.iprima\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_GEO_BYPASS = False
_NETRC_MACHINE = 'iprima'
_LOGIN_URL = 'https://auth.iprima.cz/oauth2/login'
_TOKEN_URL = 'https://auth.iprima.cz/oauth2/token'
access_token = None
_TESTS = [{
'url': 'https://prima.iprima.cz/particka/92-epizoda',
'info_dict': {
'id': 'p51388',
'ext': 'mp4',
'title': 'Partička (92)',
'description': 'md5:859d53beae4609e6dd7796413f1b6cac',
'upload_date': '20201103',
'timestamp': 1604437480,
},
'params': {
'skip_download': True, # m3u8 download
},
}, {
'url': 'http://play.iprima.cz/particka/particka-92',
'only_matching': True,
}, {
# geo restricted
'url': 'http://play.iprima.cz/closer-nove-pripady/closer-nove-pripady-iv-1',
'only_matching': True,
}, {
'url': 'https://prima.iprima.cz/my-little-pony/mapa-znameni-2-2',
'only_matching': True,
}, {
'url': 'https://prima.iprima.cz/porady/jak-se-stavi-sen/rodina-rathousova-praha',
'only_matching': True,
}, {
'url': 'http://www.iprima.cz/filmy/desne-rande',
'only_matching': True,
}, {
'url': 'https://zoom.iprima.cz/10-nejvetsich-tajemstvi-zahad/posvatna-mista-a-stavby',
'only_matching': True,
}, {
'url': 'https://krimi.iprima.cz/mraz-0/sebevrazdy',
'only_matching': True,
}, {
'url': 'https://cool.iprima.cz/derava-silnice-nevadi',
'only_matching': True,
}, {
'url': 'https://love.iprima.cz/laska-az-za-hrob/slib-dany-bratrovi',
'only_matching': True,
}]
def _perform_login(self, username, password):
if self.access_token:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, note='Downloading login page',
errnote='Downloading login page failed')
login_form = self._hidden_inputs(login_page)
login_form.update({
'_email': username,
'_password': password})
_, login_handle = self._download_webpage_handle(
self._LOGIN_URL, None, data=urlencode_postdata(login_form),
note='Logging in')
code = parse_qs(login_handle.geturl()).get('code')[0]
if not code:
raise ExtractorError('Login failed', expected=True)
token_request_data = {
'scope': 'openid+email+profile+phone+address+offline_access',
'client_id': 'prima_sso',
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': 'https://auth.iprima.cz/sso/auth-check'}
token_data = self._download_json(
self._TOKEN_URL, None,
note='Downloading token', errnote='Downloading token failed',
data=urlencode_postdata(token_request_data))
self.access_token = token_data.get('access_token')
if self.access_token is None:
raise ExtractorError('Getting token failed', expected=True)
def _real_initialize(self):
if not self.access_token:
self.raise_login_required('Login is required to access any iPrima content', method='password')
def _raise_access_error(self, error_code):
if error_code == 'PLAY_GEOIP_DENIED':
self.raise_geo_restricted(countries=['CZ'], metadata_available=True)
elif error_code is not None:
self.raise_no_formats('Access to stream infos forbidden', expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
['og:title', 'twitter:title'],
webpage, 'title', default=None)
video_id = self._search_regex((
r'productId\s*=\s*([\'"])(?P<id>p\d+)\1',
r'pproduct_id\s*=\s*([\'"])(?P<id>p\d+)\1'),
webpage, 'real id', group='id')
metadata = self._download_json(
f'https://api.play-backend.iprima.cz/api/v1//products/id-{video_id}/play',
video_id, note='Getting manifest URLs', errnote='Failed to get manifest URLs',
headers={'X-OTT-Access-Token': self.access_token},
expected_status=403)
self._raise_access_error(metadata.get('errorCode'))
stream_infos = metadata.get('streamInfos')
formats = []
if stream_infos is None:
self.raise_no_formats('Reading stream infos failed', expected=True)
else:
for manifest in stream_infos:
manifest_type = manifest.get('type')
manifest_url = manifest.get('url')
ext = determine_ext(manifest_url)
if manifest_type == 'HLS' or ext == 'm3u8':
formats += self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
elif manifest_type == 'DASH' or ext == 'mpd':
formats += self._extract_mpd_formats(
manifest_url, video_id, mpd_id='dash', fatal=False)
self._sort_formats(formats)
final_result = self._search_json_ld(webpage, video_id) or {}
final_result.update({
'id': video_id,
'title': title,
'thumbnail': self._html_search_meta(
['thumbnail', 'og:image', 'twitter:image'],
webpage, 'thumbnail', default=None),
'formats': formats,
'description': self._html_search_meta(
['description', 'og:description', 'twitter:description'],
webpage, 'description', default=None)})
return final_result
class IPrimaCNNIE(InfoExtractor):
_VALID_URL = r'https?://cnn\.iprima\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_GEO_BYPASS = False
_TESTS = [{
'url': 'https://cnn.iprima.cz/porady/strunc/24072020-koronaviru-mam-plne-zuby-strasit-druhou-vlnou-je-absurdni-rika-senatorka-dernerova',
'info_dict': {
'id': 'p716177',
'ext': 'mp4',
'title': 'md5:277c6b1ed0577e51b40ddd35602ff43e',
},
'params': {
'skip_download': 'm3u8'
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('play.iprima.cz', 'ott_adult_confirmed', '1')
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(
webpage, default=None) or self._search_regex(
r'<h1>([^<]+)', webpage, 'title')
video_id = self._search_regex(
(r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)',
r'data-product="([^"]+)">',
r'id=["\']player-(p\d+)"',
r'playerId\s*:\s*["\']player-(p\d+)',
r'\bvideos\s*=\s*["\'](p\d+)'),
webpage, 'real id')
playerpage = self._download_webpage(
'http://play.iprima.cz/prehravac/init',
video_id, note='Downloading player', query={
'_infuse': 1,
'_ts': round(time.time()),
'productId': video_id,
}, headers={'Referer': url})
formats = []
def extract_formats(format_url, format_key=None, lang=None):
ext = determine_ext(format_url)
new_formats = []
if format_key == 'hls' or ext == 'm3u8':
new_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
elif format_key == 'dash' or ext == 'mpd':
return
new_formats = self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False)
if lang:
for f in new_formats:
if not f.get('language'):
f['language'] = lang
formats.extend(new_formats)
options = self._parse_json(
self._search_regex(
r'(?s)(?:TDIPlayerOptions|playerOptions)\s*=\s*({.+?});\s*\]\]',
playerpage, 'player options', default='{}'),
video_id, transform_source=js_to_json, fatal=False)
if options:
for key, tracks in options.get('tracks', {}).items():
if not isinstance(tracks, list):
continue
for track in tracks:
src = track.get('src')
if src:
extract_formats(src, key.lower(), track.get('lang'))
if not formats:
for _, src in re.findall(r'src["\']\s*:\s*(["\'])(.+?)\1', playerpage):
extract_formats(src)
if not formats and '>GEO_IP_NOT_ALLOWED<' in playerpage:
self.raise_geo_restricted(countries=['CZ'], metadata_available=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'formats': formats,
'description': self._og_search_description(webpage, default=None),
}
```
#### File: yt_dlp/extractor/lecturio.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
float_or_none,
int_or_none,
str_or_none,
url_or_none,
urlencode_postdata,
urljoin,
)
class LecturioBaseIE(InfoExtractor):
_API_BASE_URL = 'https://app.lecturio.com/api/en/latest/html5/'
_LOGIN_URL = 'https://app.lecturio.com/en/login'
_NETRC_MACHINE = 'lecturio'
def _perform_login(self, username, password):
# Sets some cookies
_, urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(url_handle):
return self._LOGIN_URL not in url_handle.geturl()
# Already logged in
if is_logged(urlh):
return
login_form = {
'signin[email]': username,
'signin[password]': password,
'signin[remember]': 'on',
}
response, urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form))
# Logged in successfully
if is_logged(urlh):
return
errors = self._html_search_regex(
r'(?s)<ul[^>]+class=["\']error_list[^>]+>(.+?)</ul>', response,
'errors', default=None)
if errors:
raise ExtractorError('Unable to login: %s' % errors, expected=True)
raise ExtractorError('Unable to log in')
class LecturioIE(LecturioBaseIE):
_VALID_URL = r'''(?x)
https://
(?:
app\.lecturio\.com/([^/]+/(?P<nt>[^/?#&]+)\.lecture|(?:\#/)?lecture/c/\d+/(?P<id>\d+))|
(?:www\.)?lecturio\.de/[^/]+/(?P<nt_de>[^/?#&]+)\.vortrag
)
'''
_TESTS = [{
'url': 'https://app.lecturio.com/medical-courses/important-concepts-and-terms-introduction-to-microbiology.lecture#tab/videos',
'md5': '9a42cf1d8282a6311bf7211bbde26fde',
'info_dict': {
'id': '39634',
'ext': 'mp4',
'title': 'Important Concepts and Terms — Introduction to Microbiology',
},
'skip': 'Requires lecturio account credentials',
}, {
'url': 'https://www.lecturio.de/jura/oeffentliches-recht-staatsexamen.vortrag',
'only_matching': True,
}, {
'url': 'https://app.lecturio.com/#/lecture/c/6434/39634',
'only_matching': True,
}]
_CC_LANGS = {
'Arabic': 'ar',
'Bulgarian': 'bg',
'German': 'de',
'English': 'en',
'Spanish': 'es',
'Persian': 'fa',
'French': 'fr',
'Japanese': 'ja',
'Polish': 'pl',
'Pashto': 'ps',
'Russian': 'ru',
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
nt = mobj.group('nt') or mobj.group('nt_de')
lecture_id = mobj.group('id')
display_id = nt or lecture_id
api_path = 'lectures/' + lecture_id if lecture_id else 'lecture/' + nt + '.json'
video = self._download_json(
self._API_BASE_URL + api_path, display_id)
title = video['title'].strip()
if not lecture_id:
pid = video.get('productId') or video.get('uid')
if pid:
spid = pid.split('_')
if spid and len(spid) == 2:
lecture_id = spid[1]
formats = []
for format_ in video['content']['media']:
if not isinstance(format_, dict):
continue
file_ = format_.get('file')
if not file_:
continue
ext = determine_ext(file_)
if ext == 'smil':
# smil contains only broken RTMP formats anyway
continue
file_url = url_or_none(file_)
if not file_url:
continue
label = str_or_none(format_.get('label'))
filesize = int_or_none(format_.get('fileSize'))
f = {
'url': file_url,
'format_id': label,
'filesize': float_or_none(filesize, invscale=1000)
}
if label:
mobj = re.match(r'(\d+)p\s*\(([^)]+)\)', label)
if mobj:
f.update({
'format_id': mobj.group(2),
'height': int(mobj.group(1)),
})
formats.append(f)
self._sort_formats(formats)
subtitles = {}
automatic_captions = {}
captions = video.get('captions') or []
for cc in captions:
cc_url = cc.get('url')
if not cc_url:
continue
cc_label = cc.get('translatedCode')
lang = cc.get('languageCode') or self._search_regex(
r'/([a-z]{2})_', cc_url, 'lang',
default=cc_label.split()[0] if cc_label else 'en')
original_lang = self._search_regex(
r'/[a-z]{2}_([a-z]{2})_', cc_url, 'original lang',
default=None)
sub_dict = (automatic_captions
if 'auto-translated' in cc_label or original_lang
else subtitles)
sub_dict.setdefault(self._CC_LANGS.get(lang, lang), []).append({
'url': cc_url,
})
return {
'id': lecture_id or nt,
'title': title,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
}
class LecturioCourseIE(LecturioBaseIE):
_VALID_URL = r'https://app\.lecturio\.com/(?:[^/]+/(?P<nt>[^/?#&]+)\.course|(?:#/)?course/c/(?P<id>\d+))'
_TESTS = [{
'url': 'https://app.lecturio.com/medical-courses/microbiology-introduction.course#/',
'info_dict': {
'id': 'microbiology-introduction',
'title': 'Microbiology: Introduction',
'description': 'md5:13da8500c25880c6016ae1e6d78c386a',
},
'playlist_count': 45,
'skip': 'Requires lecturio account credentials',
}, {
'url': 'https://app.lecturio.com/#/course/c/6434',
'only_matching': True,
}]
def _real_extract(self, url):
nt, course_id = self._match_valid_url(url).groups()
display_id = nt or course_id
api_path = 'courses/' + course_id if course_id else 'course/content/' + nt + '.json'
course = self._download_json(
self._API_BASE_URL + api_path, display_id)
entries = []
for lecture in course.get('lectures', []):
lecture_id = str_or_none(lecture.get('id'))
lecture_url = lecture.get('url')
if lecture_url:
lecture_url = urljoin(url, lecture_url)
else:
lecture_url = 'https://app.lecturio.com/#/lecture/c/%s/%s' % (course_id, lecture_id)
entries.append(self.url_result(
lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id))
return self.playlist_result(
entries, display_id, course.get('title'),
clean_html(course.get('description')))
class LecturioDeCourseIE(LecturioBaseIE):
_VALID_URL = r'https://(?:www\.)?lecturio\.de/[^/]+/(?P<id>[^/?#&]+)\.kurs'
_TEST = {
'url': 'https://www.lecturio.de/jura/grundrechte.kurs',
'only_matching': True,
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = []
for mobj in re.finditer(
r'(?s)<td[^>]+\bdata-lecture-id=["\'](?P<id>\d+).+?\bhref=(["\'])(?P<url>(?:(?!\2).)+\.vortrag)\b[^>]+>',
webpage):
lecture_url = urljoin(url, mobj.group('url'))
lecture_id = mobj.group('id')
entries.append(self.url_result(
lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id))
title = self._search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title', default=None)
return self.playlist_result(entries, display_id, title)
```
#### File: yt_dlp/extractor/myspass.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_duration,
xpath_text,
)
class MySpassIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?myspass\.de/(?:[^/]+/)*(?P<id>\d+)/?[^/]*$'
_TESTS = [{
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
'description': 'md5:9f0db5044c8fe73f528a390498f7ce9b',
'title': '17.02.2013 - Die Highlights, Teil 2',
'thumbnail': r're:.*\.jpg',
'duration': 323.0,
'episode': '17.02.2013 - Die Highlights, Teil 2',
'season_id': '544',
'episode_number': 1,
'series': 'Absolute Mehrheit',
'season_number': 2,
'season': 'Season 2',
},
},
{
'url': 'https://www.myspass.de/shows/tvshows/tv-total/Novak-Puffovic-bei-bester-Laune--/44996/',
'md5': 'eb28b7c5e254192046e86ebaf7deac8f',
'info_dict': {
'id': '44996',
'ext': 'mp4',
'description': 'md5:74c7f886e00834417f1e427ab0da6121',
'title': 'Novak Puffovic bei bester Laune',
'thumbnail': r're:.*\.jpg',
'episode_number': 8,
'episode': 'Novak Puffovic bei bester Laune',
'series': 'TV total',
'season': 'Season 19',
'season_id': '987',
'duration': 2941.0,
'season_number': 19,
},
},
{
'url': 'https://www.myspass.de/channels/tv-total-raabigramm/17033/20831/',
'md5': '7b293a6b9f3a7acdd29304c8d0dbb7cc',
'info_dict': {
'id': '20831',
'ext': 'mp4',
'description': 'Gefühle pur: Schaut euch die ungeschnittene Version von <NAME> an die Moderationsgrazie von Welt, Verona Feldbusch, an.',
'title': 'Raabigramm Verona Feldbusch',
'thumbnail': r're:.*\.jpg',
'episode_number': 6,
'episode': 'Raabigramm Verona Feldbusch',
'series': 'TV total',
'season': 'Season 1',
'season_id': '34',
'duration': 105.0,
'season_number': 1,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
metadata = self._download_xml('http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=' + video_id, video_id)
title = xpath_text(metadata, 'title', fatal=True)
video_url = xpath_text(metadata, 'url_flv', 'download url', True)
video_id_int = int(video_id)
for group in self._search_regex(r'/myspass2009/\d+/(\d+)/(\d+)/(\d+)/', video_url, 'myspass', group=(1, 2, 3), default=[]):
group_int = int(group)
if group_int > video_id_int:
video_url = video_url.replace(group, compat_str(group_int // video_id_int))
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': xpath_text(metadata, 'imagePreview'),
'description': xpath_text(metadata, 'description'),
'duration': parse_duration(xpath_text(metadata, 'duration')),
'series': xpath_text(metadata, 'format'),
'season_number': int_or_none(xpath_text(metadata, 'season')),
'season_id': xpath_text(metadata, 'season_id'),
'episode': title,
'episode_number': int_or_none(xpath_text(metadata, 'episode')),
}
```
#### File: yt_dlp/extractor/nfb.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class NFBIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nfb\.ca/film/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.nfb.ca/film/trafficopter/',
'info_dict': {
'id': 'trafficopter',
'ext': 'mp4',
'title': 'Trafficopter',
'description': 'md5:060228455eb85cf88785c41656776bc0',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '<NAME>',
'release_year': 1972,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://www.nfb.ca/film/%s/' % video_id, video_id)
iframe = self._html_search_regex(
r'<[^>]+\bid=["\']player-iframe["\'][^>]*src=["\']([^"\']+)',
webpage, 'iframe', default=None, fatal=True)
if iframe.startswith('/'):
iframe = f'https://www.nfb.ca{iframe}'
player = self._download_webpage(iframe, video_id)
source = self._html_search_regex(
r'source:\s*\'([^\']+)',
player, 'source', default=None, fatal=True)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(source, video_id, ext='mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': self._html_search_regex(
r'<[^>]+\bid=["\']titleHeader["\'][^>]*>\s*<h1[^>]*>\s*([^<]+?)\s*</h1>',
webpage, 'title', default=None),
'description': self._html_search_regex(
r'<[^>]+\bid=["\']tabSynopsis["\'][^>]*>\s*<p[^>]*>\s*([^<]+)',
webpage, 'description', default=None),
'thumbnail': self._html_search_regex(
r'poster:\s*\'([^\']+)',
player, 'thumbnail', default=None),
'uploader': self._html_search_regex(
r'<[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
webpage, 'uploader', default=None),
'release_year': int_or_none(self._html_search_regex(
r'<[^>]+\bitemprop=["\']datePublished["\'][^>]*>([^<]+)',
webpage, 'release_year', default=None)),
'formats': formats,
'subtitles': subtitles,
}
```
#### File: yt_dlp/extractor/openload.py
```python
from __future__ import unicode_literals
import json
import os
import subprocess
import tempfile
from ..compat import (
compat_urlparse,
compat_kwargs,
)
from ..utils import (
check_executable,
encodeArgument,
ExtractorError,
get_exe_version,
is_outdated_version,
Popen,
)
def cookie_to_dict(cookie):
cookie_dict = {
'name': cookie.name,
'value': cookie.value,
}
if cookie.port_specified:
cookie_dict['port'] = cookie.port
if cookie.domain_specified:
cookie_dict['domain'] = cookie.domain
if cookie.path_specified:
cookie_dict['path'] = cookie.path
if cookie.expires is not None:
cookie_dict['expires'] = cookie.expires
if cookie.secure is not None:
cookie_dict['secure'] = cookie.secure
if cookie.discard is not None:
cookie_dict['discard'] = cookie.discard
try:
if (cookie.has_nonstandard_attr('httpOnly')
or cookie.has_nonstandard_attr('httponly')
or cookie.has_nonstandard_attr('HttpOnly')):
cookie_dict['httponly'] = True
except TypeError:
pass
return cookie_dict
def cookie_jar_to_list(cookie_jar):
return [cookie_to_dict(cookie) for cookie in cookie_jar]
class PhantomJSwrapper(object):
"""PhantomJS wrapper class
This class is experimental.
"""
_TEMPLATE = r'''
phantom.onError = function(msg, trace) {{
var msgStack = ['PHANTOM ERROR: ' + msg];
if(trace && trace.length) {{
msgStack.push('TRACE:');
trace.forEach(function(t) {{
msgStack.push(' -> ' + (t.file || t.sourceURL) + ': ' + t.line
+ (t.function ? ' (in function ' + t.function +')' : ''));
}});
}}
console.error(msgStack.join('\n'));
phantom.exit(1);
}};
var page = require('webpage').create();
var fs = require('fs');
var read = {{ mode: 'r', charset: 'utf-8' }};
var write = {{ mode: 'w', charset: 'utf-8' }};
JSON.parse(fs.read("{cookies}", read)).forEach(function(x) {{
phantom.addCookie(x);
}});
page.settings.resourceTimeout = {timeout};
page.settings.userAgent = "{ua}";
page.onLoadStarted = function() {{
page.evaluate(function() {{
delete window._phantom;
delete window.callPhantom;
}});
}};
var saveAndExit = function() {{
fs.write("{html}", page.content, write);
fs.write("{cookies}", JSON.stringify(phantom.cookies), write);
phantom.exit();
}};
page.onLoadFinished = function(status) {{
if(page.url === "") {{
page.setContent(fs.read("{html}", read), "{url}");
}}
else {{
{jscode}
}}
}};
page.open("");
'''
_TMP_FILE_NAMES = ['script', 'html', 'cookies']
@staticmethod
def _version():
return get_exe_version('phantomjs', version_re=r'([0-9.]+)')
def __init__(self, extractor, required_version=None, timeout=10000):
self._TMP_FILES = {}
self.exe = check_executable('phantomjs', ['-v'])
if not self.exe:
raise ExtractorError('PhantomJS executable not found in PATH, '
'download it from http://phantomjs.org',
expected=True)
self.extractor = extractor
if required_version:
version = self._version()
if is_outdated_version(version, required_version):
self.extractor._downloader.report_warning(
'Your copy of PhantomJS is outdated, update it to version '
'%s or newer if you encounter any errors.' % required_version)
self.options = {
'timeout': timeout,
}
for name in self._TMP_FILE_NAMES:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
self._TMP_FILES[name] = tmp
def __del__(self):
for name in self._TMP_FILE_NAMES:
try:
os.remove(self._TMP_FILES[name].name)
except (IOError, OSError, KeyError):
pass
def _save_cookies(self, url):
cookies = cookie_jar_to_list(self.extractor._downloader.cookiejar)
for cookie in cookies:
if 'path' not in cookie:
cookie['path'] = '/'
if 'domain' not in cookie:
cookie['domain'] = compat_urlparse.urlparse(url).netloc
with open(self._TMP_FILES['cookies'].name, 'wb') as f:
f.write(json.dumps(cookies).encode('utf-8'))
def _load_cookies(self):
with open(self._TMP_FILES['cookies'].name, 'rb') as f:
cookies = json.loads(f.read().decode('utf-8'))
for cookie in cookies:
if cookie['httponly'] is True:
cookie['rest'] = {'httpOnly': None}
if 'expiry' in cookie:
cookie['expire_time'] = cookie['expiry']
self.extractor._set_cookie(**compat_kwargs(cookie))
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'):
"""
Downloads webpage (if needed) and executes JS
Params:
url: website url
html: optional, html code of website
video_id: video id
note: optional, displayed when downloading webpage
note2: optional, displayed when executing JS
headers: custom http headers
jscode: code to be executed when page is loaded
Returns tuple with:
* downloaded website (after JS execution)
* anything you print with `console.log` (but not inside `page.execute`!)
In most cases you don't need to add any `jscode`.
It is executed in `page.onLoadFinished`.
`saveAndExit();` is mandatory, use it instead of `phantom.exit()`
It is possible to wait for some element on the webpage, for example:
var check = function() {
var elementFound = page.evaluate(function() {
return document.querySelector('#b.done') !== null;
});
if(elementFound)
saveAndExit();
else
window.setTimeout(check, 500);
}
page.evaluate(function(){
document.querySelector('#a').click();
});
check();
"""
if 'saveAndExit();' not in jscode:
raise ExtractorError('`saveAndExit();` not found in `jscode`')
if not html:
html = self.extractor._download_webpage(url, video_id, note=note, headers=headers)
with open(self._TMP_FILES['html'].name, 'wb') as f:
f.write(html.encode('utf-8'))
self._save_cookies(url)
replaces = self.options
replaces['url'] = url
user_agent = headers.get('User-Agent') or self.extractor.get_param('http_headers')['User-Agent']
replaces['ua'] = user_agent.replace('"', '\\"')
replaces['jscode'] = jscode
for x in self._TMP_FILE_NAMES:
replaces[x] = self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"')
with open(self._TMP_FILES['script'].name, 'wb') as f:
f.write(self._TEMPLATE.format(**replaces).encode('utf-8'))
if video_id is None:
self.extractor.to_screen('%s' % (note2,))
else:
self.extractor.to_screen('%s: %s' % (video_id, note2))
p = Popen(
[self.exe, '--ssl-protocol=any', self._TMP_FILES['script'].name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate_or_kill()
if p.returncode != 0:
raise ExtractorError(
'Executing JS failed\n:' + encodeArgument(err))
with open(self._TMP_FILES['html'].name, 'rb') as f:
html = f.read().decode('utf-8')
self._load_cookies()
return (html, encodeArgument(out))
```
|
{
"source": "JeroenKnoops/functiondefextractor",
"score": 2
}
|
#### File: JeroenKnoops/functiondefextractor/setup.py
```python
import setuptools
def get_license():
""" replace the license content while creating the package"""
with open("LICENSE.md", "r", encoding="utf8") as fh:
license_description = fh.read()
return license_description
def get_install():
""" replace the install content while creating the package"""
with open("INSTALL.md", "r", encoding="utf8") as fh:
install_description = fh.read()
return install_description
def get_maintainers():
""" replace the maintainers content while creating the package"""
with open("MAINTAINERS.md", "r", encoding="utf8") as fh:
maintainers_description = fh.read()
return maintainers_description
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
if "[INSTALL.md](INSTALL.md)" in long_description:
long_description = long_description.replace("[INSTALL.md](INSTALL.md)", str(get_install()))
if "[MAINTAINERS.md](MAINTAINERS.md)" in long_description:
long_description = long_description.replace("[MAINTAINERS.md](MAINTAINERS.md)", str(get_maintainers()))
if "[License.md](License.md)" in long_description:
long_description = long_description.replace("[License.md](License.md)", str(get_license()))
with open('requirements.txt') as f:
required = f.read().splitlines()
setuptools.setup(
name="functiondefextractor",
version="0.0.2",
author="Brijesh",
author_email="<EMAIL>",
description="Function Definition Extractor",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bkk003/FunctionDefExtractor",
packages=setuptools.find_packages(include=['functiondefextractor'], exclude=['test', '*.test', '*.test.*']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=required,
python_requires='>=3.6',
)
```
|
{
"source": "JeroenKnoops/TextSimilarityProcessor",
"score": 4
}
|
#### File: TextSimilarityProcessor/test/test_unit.py
```python
import unittest
from collections import Counter
import similarity_processor.similarity_core as cc
class MyUnitTestCase(unittest.TestCase):
"""This class verifies the individual functionality of the units:
get_cosine()
text_to_vector()
check_ tolerance()
methods with valid and invalid inputs, which verifies the function behaviour"""
def test_get_positive_cosine(self):
"""This method checks the value returned by the cosine_core.get_cosine()
for vec1, vec2: Input vector from the texts to be compared - positive cosine """
positive_cosine = cc.get_cosine(Counter({"hello": 16, "Language": 30, "python": 66}),
Counter({"Mac": 9, "MANGO": 27, "python": 88, "hello": 5}))
self.assertEqual(0.8562387195638202, positive_cosine, "Value should not be equal to 0")
def test_get_negative_cosine(self):
"""This method checks the value returned by the cosine_core.get_cosine()
for vec1, vec2: Input vector from the texts to be compared - negative cosine value"""
negative_cosine = cc.get_cosine(Counter({"hello_World": 99}),
Counter({"TEST": 888}))
self.assertEqual(0.0, negative_cosine, "Value should be 0.0")
def test_get_cosine_same(self):
"""This method checks the value returned by the cosine_core.get_cosine()
for vec1, vec2: Input vector from the texts to be compared"""
positive_cosine = cc.get_cosine(Counter({"hello": 16, "Language": 30, "python": 66}),
Counter({"hello": 16, "Language": 30, "python": 66}))
self.assertEqual(1.0, positive_cosine, "Value should not be equal to 0")
def test_get_cosine_none(self):
"""This method checks the value returned by the cosine_core.get_cosine()
for vec1, vec2: Input vector from the texts to be compared"""
_cosine = cc.get_cosine(Counter({"": 0}),
Counter({"": 0}))
self.assertEqual(0.0, _cosine, "Value should be equal to 0")
def test_text_to_invalid_vector(self):
"""This method checks the value returned by the cosine_core.text_to_vector()
for converting text to vector for invalid input """
negative_text = None
try:
negative_text = cc.text_to_vector(1234567.988766)
except TypeError as err:
print('Error: ', str(err))
self.assertIsNone(negative_text, "Vector should not be generated")
def test_text_to_valid_vector(self):
"""This method checks the value returned by the cosine_core.text_to_vector()
for converting text to vector for valid input """
text = "What is generator in Python with example Python generators \
are a simple, A Counter is a container that"
positive_text = cc.text_to_vector(text)
self.assertEqual(Counter, type(positive_text),
"Counter vector should be generated from text")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JeroenKools/covid19",
"score": 3
}
|
#### File: JeroenKools/covid19/interactive_plot.py
```python
from covid19_processing import *
import ipywidgets as widgets
def run():
data = Covid19Processing(False)
data.process(rows=0, debug=False)
data.set_default_countries([
"World", "California, US", "Mongolia", "United States",
"India", "Netherlands"])
widgets.interact(data.plot_interactive,
x_metric=["calendar_date", "day_number"],
y_metric=["confirmed", "deaths", "active",
"new confirmed", "new deaths",
"confirmed/population", "active/population", "deaths/population",
"new confirmed/population", "new deaths/population",
"recent confirmed", "recent deaths",
"recent confirmed/population", "recent deaths/population",
"deaths/confirmed", "recent deaths/recent confirmed"],
smoothing_days=widgets.IntSlider(min=0, max=31, step=1, value=7), use_log_scale=True)
```
|
{
"source": "jeroenmanders/aws-python",
"score": 2
}
|
#### File: aws/route53/client.py
```python
import boto3
class Client(object):
_instance = None
@staticmethod
def get_instance():
if Client._instance == None:
Client._instance = Client()
return Client._instance
def __init__(self):
self._client = boto3.client('route53', region_name="us-east-1")
def get_client(self):
return self._client
```
#### File: aws/ses/ses_mailer.py
```python
import boto3
from botocore.exceptions import ClientError
class SesMailer(object):
def __init__(self):
pass
def mail(self, sender, recipient, subject, body_text, body_html):
CONFIGURATION_SET = "ConfigSet"
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
client = boto3.client('ses', region_name=AWS_REGION)
try:
response = client.send_email(
Destination={
'ToAddresses': [
recipient,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': body_html,
},
'Text': {
'Charset': CHARSET,
'Data': body_text,
},
},
'Subject': {
'Charset': CHARSET,
'Data': subject,
},
},
Source=sender,
#ConfigurationSetName=CONFIGURATION_SET,
)
# Display an error if something goes wrong.
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
```
|
{
"source": "Jeroen-Matthijssens/jsx-lexer",
"score": 3
}
|
#### File: jsx-lexer/tests/test_lexer.py
```python
import os
from unittest import TestCase
from pygments import lexers
from jsx import lexer as lexer_mod
from jsx.lexer import JsxLexer
from .tokens import TOKENS as expected_tokens
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
lexer = lexers.load_lexer_from_file(lexer_mod.__file__, "JsxLexer")
with open(os.path.join(CURRENT_DIR, 'example.jsx'), 'r') as fh:
text = fh.read()
class JsxLexerTestCase(TestCase):
def test_guess_lexer_for_filename(self):
guessed_lexer = lexers.guess_lexer_for_filename('test.jsx', text)
self.assertEqual(guessed_lexer.name, JsxLexer.name)
def test_get_lexer_by_name(self):
lexer = lexers.get_lexer_by_name('jsx')
self.assertEqual(lexer.name, JsxLexer.name)
def test_get_tokens(self):
lexer = lexers.get_lexer_by_name('jsx')
tokens = lexer.get_tokens(text)
self.assertEqual([i for i in tokens], expected_tokens)
```
|
{
"source": "jeroen-meijer/lightly",
"score": 4
}
|
#### File: local_server/led_controller/led_controller.py
```python
from typing import Protocol, Callable
class LedController(Protocol):
"""
A protocol for controlling the LEDs.
Fields:
- pin: int, the GPIO pin the LEDs are connected to.
- count: int, the number of LEDs.
- order: str, the order of the LEDs.
Methods:
- setPixels(pixels: list[tuple[int, int, int]]) -> None, sets the LEDs to the given colors.
"""
# Define the three fields above
pin: int
count: int
order: str
def setPixels(self, pixels: dict[int, tuple[int, int, int]]):
"""Sets the LEDs at the given indices to the given colors."""
print("Unimplemented method setPixels() called with", pixels)
def buildPixels(
self, buildColor: Callable[[int], tuple[int, int, int]]
) -> dict[int, tuple[int, int, int]]:
"""Runs the given pixel builder callback for each pixel in the chain
and returns the resulting dictionary of pixels.
The callback should take a single argument, which is the current pixel index.
"""
pixels: dict[int, tuple[int, int, int]] = {}
for i in range(self.count):
pixel = buildColor(i)
if pixel is not None:
pixels[i] = pixel
return pixels
# Neopixel
# pixels = neopixel.NeoPixel(
# PIXEL_PIN,
# NUM_PIXELS,
# pixel_order=ORDER,
# auto_write=False,
# brightness=0.2,
# )
# def wheel(pos):
# # Input a value 0 to 255 to get a color value.
# # The colours are a transition r - g - b - back to r.
# if pos < 0 or pos > 255:
# r = g = b = 0
# elif pos < 85:
# r = int(pos * 3)
# g = int(255 - pos * 3)
# b = 0
# elif pos < 170:
# pos -= 85
# r = int(255 - pos * 3)
# g = 0
# b = int(pos * 3)
# else:
# pos -= 170
# r = 0
# g = int(pos * 3)
# b = int(255 - pos * 3)
# return (r, g, b) if ORDER in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)
# def rainbow_cycle(wait):
# for j in range(255):
# for i in range(NUM_PIXELS):
# pixel_index = (i * 256 // NUM_PIXELS) + j
# pixels[i] = wheel(pixel_index & 255)
# pixels.show()
# time.sleep(wait)
# while True:
# # Comment this line out if you have RGBW/GRBW NeoPixels
# pixels.fill((255, 0, 0))
# # Uncomment this line if you have RGBW/GRBW NeoPixels
# # pixels.fill((255, 0, 0, 0))
# pixels.show()
# time.sleep(1)
# # Comment this line out if you have RGBW/GRBW NeoPixels
# pixels.fill((0, 255, 0))
# # Uncomment this line if you have RGBW/GRBW NeoPixels
# # pixels.fill((0, 255, 0, 0))
# pixels.show()
# time.sleep(1)
# # Comment this line out if you have RGBW/GRBW NeoPixels
# pixels.fill((0, 0, 255))
# # Uncomment this line if you have RGBW/GRBW NeoPixels
# # pixels.fill((0, 0, 255, 0))
# pixels.show()
# time.sleep(1)
# rainbow_cycle(0.001) # rainbow cycle with 1ms delay per step
# firstArg = sys.argv[1]
# print("Arg: ", firstArg)
# jsonPayload = json.loads(firstArg)
# print("Json payload: ", jsonPayload)
# rgb = jsonPayload
# pixels = neopixel.NeoPixel(board.D18, 200, brightness=1)
# for _i in range(5):
# print("%s Blackout" % datetime.now())
# pixels.fill((0, 0, 0))
# time.sleep(0.5)
# print("%s Show LED" % datetime.now())
# # G, R, B
# pixels.fill((rgb[1], rgb[0], rgb[2]))
# pixels.show()
# time.sleep(0.5)
# print("--------------")
```
|
{
"source": "jeroenmeulendijks/gdax-tradebot",
"score": 4
}
|
#### File: gdax-tradebot/model/Indicators.py
```python
import pandas as pd
import matplotlib.pyplot as plt
from config import *
from stockstats import StockDataFrame
from abc import ABC, abstractmethod
class Signal(object):
BUY = "BUY"
SELL = "SELL"
value = None
class Indicator(ABC):
def __init__(self):
super().__init__()
def plotWithPrice(self):
# We assume that an indicator will not be plotted together with the prices
# when you want to plot the indicator with the prices override this method
# and return True
return False
@abstractmethod
def plot(self, subplot, stock):
pass
def signal(self, dataframes):
return Signal()
@classmethod
def isEnabled(cls):
return (cls.__str__(cls) in INDICATORS)
@abstractmethod
def __str__(self):
pass
class EMA(Indicator):
def __init__(self):
super().__init__()
def plotWithPrice(self):
return True
def plot(self, subplot, stock):
plt.plot(stock['datetime'], stock['close_5_ema'])
plt.plot(stock['datetime'], stock['close_20_ema'])
def signal(self, stock):
s = Signal()
if stock.shape[0] > 5:
EMA5 = stock['close_5_ema'].tail(2).reset_index(drop=True)
EMA20 = stock['close_20_ema'].tail(2).reset_index(drop=True)
if (EMA5[1] <= EMA20[1]) & (EMA5[0] >= EMA20[0]):
s.value = Signal.SELL
elif (EMA5[1] >= EMA20[1]) & (EMA5[0] <= EMA20[0]):
s.value = Signal.BUY
return s
def __str__(self):
return "EMA"
class RSI(Indicator):
def __init__(self):
super().__init__()
def plot(self, subplot, stock):
subplot.cla()
plt.plot(stock['datetime'], stock['rsi_14'])
def __str__(self):
return "RSI"
def calculateRSI(self, dataframe, period):
# Calculate RSI and add to dataframe
length = dataframe.shape[0]
if (length >= period):
delta = dataframe['close'].dropna().apply(float).diff()
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RollUp = dUp.rolling(window=period).mean()
RollDown = dDown.rolling(window=period).mean().abs()
RS = RollUp / RollDown
RSI = 100.0 - (100.0 / (1.0 + RS))
dataframe['RSI'] = RSI
class MACD(Indicator):
def __init__(self):
super().__init__()
def plot(self, subplot, stock):
subplot.cla()
plt.plot(stock['datetime'], stock['macd'])
plt.plot(stock['datetime'], stock['macds'])
def signal(self, stock):
s = Signal()
if stock.shape[0] > 2:
signal = stock['macds'].tail(2).reset_index(drop=True)
macd = stock['macd'].tail(2).reset_index(drop=True)
# If the MACD crosses the signal line upward BUY!
if macd[1] > signal[1] and macd[0] <= signal[0]:
s.value = Signal.BUY
# The other way around. SELL
elif macd[1] < signal[1] and macd[0] >= signal[0]:
s.value = Signal.SELL
# Do nothing if not crossed
else:
pass
return s
def __str__(self):
return "MACD"
class DMI(Indicator):
def __init__(self):
super().__init__()
def plot(self, subplot, stock):
subplot.cla()
# +DI, default to 14 days
plt.plot(stock['datetime'], stock['pdi'])
# -DI, default to 14 days
plt.plot(stock['datetime'], stock['mdi'])
plt.plot(stock['datetime'], stock['adx'])
plt.plot(stock['datetime'], stock['adxr'])
def __str__(self):
return "DMI"
```
|
{
"source": "JeroenMols/AdventOfCode2021",
"score": 3
}
|
#### File: AdventOfCode2021/day_10/day_10.py
```python
def load_input(file_name):
a_file = open(file_name, "r")
input = []
for line in a_file:
input.append(line.strip())
return input
def problem_a():
# lines = load_input("day_10_sample.txt")
lines = load_input("day_10.txt")
char_to_points = {')': 3, ']': 57, '}': 1197, '>': 25137}
score = 0
for line in lines:
replaced = replace_valid_chucks(line)
invalid_chars = get_invalid_chars(replaced)
if len(invalid_chars) != 0:
score += char_to_points.get(invalid_chars[0])
print('Result: ', score)
def problem_b():
# lines = load_input("day_10_sample.txt")
lines = load_input("day_10.txt")
char_to_points = {'(': 1, '[': 2, '{': 3, '<': 4}
scores = []
for line in lines:
replaced = replace_valid_chucks(line)
if len(get_invalid_chars(replaced)) == 0:
score = 0
for char in replaced[::-1]:
score = score * 5 + char_to_points[char]
scores.append(score)
scores.sort()
print('Result: ', scores[int(len(scores) / 2)])
def replace_valid_chucks(line):
before = line
while True:
after = before.replace('()', '').replace('[]', '').replace('{}', '').replace('<>', '')
if len(after) == len(before):
break
else:
before = after
return after
def get_invalid_chars(replaced):
return replaced.replace('(', '').replace('[', '').replace('{', '').replace('<', '')
if __name__ == '__main__':
problem_b()
```
#### File: AdventOfCode2021/day_11/day_11.py
```python
def load_input(file_name):
a_file = open(file_name, "r")
input = []
for line in a_file:
input.append([Octopus(element, True) for element in line.strip()])
return input
class Octopus:
value: int
flashed: bool
processed: bool = False
def __init__(self, value, flashed):
self.value = int(value)
self.flashed = flashed
def reset(self):
self.flashed = False
self.processed = False
def step(self):
if not self.flashed:
self.value += 1
if self.value > 9:
self.flashed = True
self.value = 0
def __str__(self):
return str(self.value)
def problem_a():
# octopi = load_input("day_11_sample.txt")
octopi = load_input("day_11.txt")
flashes = 0
for step in range(0, 100):
perform_step(octopi)
# Count flashes
flashes_in_step = count_flashes(octopi)
flashes += flashes_in_step
print(step + 1, " flashes: ", flashes_in_step)
print_matrix(octopi)
print('Result: ', flashes)
def problem_b():
# octopi = load_input("day_11_sample.txt")
octopi = load_input("day_11.txt")
step = 0
while True:
step += 1
perform_step(octopi)
if count_flashes(octopi) == 100:
break
print('Result: ', step)
def perform_step(octopi):
# Reset octopi
for x in range(0, len(octopi[0])):
for y in range(0, len(octopi)):
octopi[y][x].reset()
# First add
for x in range(0, len(octopi[0])):
for y in range(0, len(octopi)):
octopi[y][x].step()
# Snowball
has_more_flashes = True
while has_more_flashes:
has_more_flashes = False
# Process all flashed octopi that haven't been processed yet
for x in range(0, len(octopi[0])):
for y in range(0, len(octopi)):
if octopi[y][x].flashed and not octopi[y][x].processed:
# add to neighbours
has_more_flashes = True
octopi[y][x].processed = True
process_neighbours(octopi, x, y)
def count_flashes(octopi):
flashes_in_step = 0
for x in range(0, len(octopi[0])):
for y in range(0, len(octopi)):
if octopi[y][x].flashed:
flashes_in_step += 1
return flashes_in_step
def get_value_or_default(matrix, x, y):
if y < 0 or y >= len(matrix) or x < 0 or x >= len(matrix[0]):
return Octopus(0, False)
else:
return matrix[y][x]
def process_neighbours(matrix, x, y):
get_value_or_default(matrix, x - 1, y - 1).step()
get_value_or_default(matrix, x - 1, y).step()
get_value_or_default(matrix, x - 1, y + 1).step()
get_value_or_default(matrix, x, y - 1).step()
get_value_or_default(matrix, x, y + 1).step()
get_value_or_default(matrix, x + 1, y - 1).step()
get_value_or_default(matrix, x + 1, y).step()
get_value_or_default(matrix, x + 1, y + 1).step()
def print_matrix(matrix):
for y in range(0, len(matrix)):
line = ''
for x in range(0, len(matrix[0])):
line += str(matrix[y][x])
print(line)
if __name__ == '__main__':
problem_a()
```
#### File: AdventOfCode2021/day_12/day_12.py
```python
def load_input(file_name):
a_file = open(file_name, "r")
input = []
for line in a_file:
route = line.strip().split('-')
input.append(Path(route[0], route[1]))
return input
class Path:
start: str
end: str
def __init__(self, start, end):
self.start = start
self.end = end
class Route:
nodes: []
allow_one_double_visit: bool
def __init__(self, nodes, allow_one_double_visit=False):
self.nodes = nodes
self.allow_one_double_visit = allow_one_double_visit
def end(self):
return self.nodes[-1]
def can_pass_by(self, node):
if node == 'start' and 'start' in self.nodes:
return False
if node.islower() and node in self.nodes:
if not self.allow_one_double_visit:
return False
elif not self.has_double():
return True
else:
return False
else:
return True
def has_double(self):
lower_nodes = []
for node in self.nodes:
if not node.islower():
continue
elif node in lower_nodes:
return True
else:
lower_nodes.append(node)
return False
def finished(self):
if self.nodes[-1] == 'end':
return True
else:
return False
def __str__(self):
to_string = ''
for node in self.nodes:
to_string += node + ','
return to_string[:-1]
def problem_a():
# paths = load_input("day_12_sample.txt")
paths = load_input("day_12.txt")
new_routes = [Route(['start'])]
completed_routes = []
while len(new_routes) > 0:
new_routes, completed = routes_step(new_routes, paths, False)
completed_routes += completed
print_routes(completed_routes)
print("Result: ", len(completed_routes))
def problem_b():
# paths = load_input("day_12_sample.txt")
paths = load_input("day_12.txt")
new_routes = [Route(['start'])]
completed_routes = []
while len(new_routes) > 0:
new_routes, completed = routes_step(new_routes, paths, True)
completed_routes += completed
print_routes(completed_routes)
print("Result: ", len(completed_routes))
def routes_step(routes, paths, allow_double):
new_routes = []
completed_routes = []
for route in routes:
if route.finished():
completed_routes.append(route)
continue
for path in paths:
if route.end() == path.start:
if route.can_pass_by(path.end):
new_nodes = route.nodes.copy() + [path.end]
new_routes.append(Route(new_nodes, allow_double))
elif route.end() == path.end:
if route.can_pass_by(path.start):
new_nodes = route.nodes.copy() + [path.start]
new_routes.append(Route(new_nodes, allow_double))
return new_routes, completed_routes
def print_routes(routes):
for route in routes:
print(route)
if __name__ == '__main__':
problem_b()
```
#### File: AdventOfCode2021/day_14/day_14.py
```python
def load_input(file_name):
a_file = open(file_name, "r")
template = ''
insertions = {}
for line in a_file:
if '->' in line:
raw_insertions = line.strip().split(' -> ')
insertions[raw_insertions[0]] = raw_insertions[1]
elif line.strip() == '':
continue
else:
template = line.strip()
return template, insertions
def problem_a():
template, insertions = load_input("day_14_sample.txt")
# template, insertions = load_input("day_14.txt")
processed = perform_insertions(template, insertions, 10)
occurrences = get_occurrences(processed)
print (occurrences)
print("Result: ", max(occurrences.values()) - min(occurrences.values()))
def problem_b():
# template, insertions = load_input("day_14_sample.txt")
template, insertions = load_input("day_14.txt")
times = 40
initial = 20
first_pass = perform_insertions(template, insertions, initial)
# Split in smaller shards
shards = []
for index in range(0, len(first_pass) - 1):
shards.append(first_pass[index] + first_pass[index + 1])
shard_occurrences = process_shards(initial, insertions, shards, times)
occurrences = merge_shard_occurrences(shard_occurrences)
print("Result: ", max(occurrences.values()) - min(occurrences.values()))
def process_shards(initial, insertions, shards, times):
cache = {}
shard_occurrences = []
for index, shard in enumerate(shards):
if index == len(shards) - 1:
processed = perform_insertions(shard, insertions, times - initial)
shard_occurrences.append(get_occurrences(processed))
else:
if shard not in cache.keys():
# Exclude last element as that's duplicate in the next shard.
processed = perform_insertions(shard, insertions, times - initial)
cache[shard] = get_occurrences(processed[0:len(processed) - 1])
shard_occurrences.append(cache[shard])
print(index,'/', len(shards))
return shard_occurrences
def perform_insertions(shard, insertions, times):
for step in range(0, times):
new_shard = [shard[0]]
for index in range(1, len(shard)):
new_shard.append(insertions[(shard[index - 1] + shard[index])])
new_shard.append(shard[index])
shard = new_shard.copy()
return shard
def get_occurrences(template):
occurrences = {}
for char in template:
if char in occurrences:
occurrences[char] = occurrences[char] + 1
else:
occurrences[char] = 1
return occurrences
def merge_shard_occurrences(shard_occurrences):
occurrences = {}
for shard_occurrences in shard_occurrences:
for char in shard_occurrences:
if char in occurrences:
occurrences[char] = occurrences[char] + shard_occurrences[char]
else:
occurrences[char] = shard_occurrences[char]
return occurrences
if __name__ == '__main__':
problem_b()
```
#### File: AdventOfCode2021/day_15/day_15.py
```python
from dataclasses import dataclass
def load_input(file_name):
a_file = open(file_name, "r")
cavern = []
for line in a_file:
cavern.append([int(element) for element in list(line.strip())])
return cavern
@dataclass(eq=True, frozen=True)
class Path:
points: []
cost: int
@dataclass(eq=True, frozen=True)
class Point:
x: int
y: int
def problem_a():
# cavern = load_input("day_15_sample.txt")
cavern = load_input("day_15.txt")
min_cost = brute_force_all_paths(cavern)
print("Result: ", min_cost)
# Does't work, need to use an algorithm like Dykstra or A+
# After 2hours of running, made it to step 571
def problem_b():
# cavern = load_input("day_15_sample.txt")
cavern = load_input("day_15.txt")
data = repeat_input(cavern, 5)
min_cost = brute_force_all_paths(data)
print('Result:', min_cost)
def repeat_input(cavern, times):
data = []
for y in range(0, len(cavern) * times):
data.append([])
for x in range(0, len(cavern[0]) * times):
value = cavern[(y % len(cavern))][(x % len(cavern[0]))]
value = (value + int(y / len(cavern)) + int(x / len(cavern[0])))
if value > 9:
value -= 9
data[y].append(value)
return data
# Very fast approach, but only considers moving down + right
def down_right_minimum(cavern):
min_cost = []
for y in range(0, len(cavern)):
min_cost.append([0] * len(cavern[0]))
for y in range(0, len(cavern)):
cost_down = get(min_cost, 0, y - 1) + get(cavern, 0, y)
min_cost[y][0] = cost_down
for x in range(0, len(cavern[0])):
cost_right = get(min_cost, x - 1, 0) + get(cavern, x, 0)
min_cost[0][x] = cost_right
for y in range(1, len(cavern)):
for x in range(1, len(cavern[0])):
cost_down = get(min_cost, x, y - 1) + get(cavern, x, y)
cost_right = get(min_cost, x - 1, y) + get(cavern, x, y)
min_cost[y][x] = min(cost_down, cost_right)
return min_cost[-1][-1] - cavern[0][0]
def get(data, x, y, default=0):
if x < 0 or y < 0:
return default
else:
return data[y][x]
def print_data(data):
for y in range(0, len(data)):
line = ''
for x in range(0, len(data[0])):
line += str(data[y][x])
# if data[y][x] < 10:
# line += '0' + str(data[y][x]) + '|'
# else:
# line += str(data[y][x]) + '|'
print(line)
# Steps in all directions, tries to smartly discard paths but is still very slow
def brute_force_all_paths(cavern):
next_paths = [Path([Point(0, 0)], cavern[0][0])]
# take path straight down and to right as first guess of minimum
min_cost = down_right_minimum(cavern) + cavern[0][0]
print(min_cost)
end_point = Point(len(cavern[0]) - 1, len(cavern) - 1)
for step in range(0, len(cavern) * len(cavern[0])):
print(step, '/', len(cavern) * len(cavern[0]))
paths = next_paths
next_paths_dict = {}
for path in paths:
next_points = []
if path.points[-1].x > 0:
left = Point(path.points[-1].x - 1, path.points[-1].y)
next_points.append(left)
if path.points[-1].x < len(cavern[0]) - 1:
right = Point(path.points[-1].x + 1, path.points[-1].y)
next_points.append(right)
if path.points[-1].y > 0:
up = Point(path.points[-1].x, path.points[-1].y - 1)
next_points.append(up)
if path.points[-1].y < len(cavern) - 1:
down = Point(path.points[-1].x, path.points[-1].y + 1)
next_points.append(down)
for point in next_points:
# Reached end point
if point == end_point:
new_points = path.points.copy()
new_points.append(point)
min_cost = min(path.cost + cavern[point.y][point.x], min_cost)
continue
# Don't allow loops
if point in path.points:
continue
new_points = path.points.copy()
new_points.append(point)
new_cost = path.cost + cavern[point.y][point.x]
# Stop processing path when cost higher than current min
if new_cost > min_cost:
continue
# Only consider path if cheaper than other path to same point
elif point in next_paths_dict:
cost = next_paths_dict[point].cost
if new_cost < cost:
next_paths_dict[point] = Path(new_points, new_cost)
else:
next_paths_dict[point] = Path(new_points, new_cost)
next_paths = next_paths_dict.values()
if len(next_paths) == 0:
print("done")
break
return min_cost - cavern[0][0]
def calculate_min_cost(cavern, paths_to_end):
costs = []
for path in paths_to_end:
cost = 0
for point in path.points:
cost += cavern[point.y][point.x]
costs.append(cost)
return min(costs)
def calculate_cost(cavern, points):
cost = 0
for point in points:
cost += cavern[point.y][point.x]
return cost
if __name__ == '__main__':
problem_b()
```
|
{
"source": "jeroenmoons/drl_banana",
"score": 3
}
|
#### File: drl_banana/agent/dqn.py
```python
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from agent.base import UnityAgent
from agent.estimate.neural import FullyConnectedNetwork
from agent.memory.buffer import ReplayBuffer
class DqnAgent(UnityAgent):
"""Chooses epsilon-greedy actions using a NN to estimate action values."""
# Default params
DEVICE_DEFAULT = 'cpu' # pytorch device
HIDDEN_LAYER_SIZES_DEFAULT = (50, 50) # default q network hidden layer sizes
REPLAY_BUFFER_SIZE_DEFAULT = 100000 # max nr of experiences in memory
ALPHA_DEFAULT = .1 # default learning rate
GAMMA_DEFAULT = .9 # default reward discount factor
EPSILON_DEFAULT = 1. # starting value for epsilon
EPSILON_DECAY_DEFAULT = .9999 # used to decay epsilon over time
EPSILON_MIN_DEFAULT = .005 # minimum value for decayed epsilon
LEARN_BATCH_SIZE_DEFAULT = 50 # batch size to use when learning from memory
def __init__(self, brain_name, state_size, action_size, params):
super().__init__(brain_name, state_size, action_size, params)
# pytorch device
self.device = params.get('device', self.DEVICE_DEFAULT)
# learning parameters
self.alpha = params.get('alpha', self.ALPHA_DEFAULT)
self.gamma = params.get('gamma', self.GAMMA_DEFAULT)
self.epsilon = params.get('epsilon', self.EPSILON_DEFAULT)
self.epsilon_decay = params.get('epsilon_decay', self.EPSILON_DECAY_DEFAULT)
self.epsilon_min = params.get('epsilon_min', self.EPSILON_MIN_DEFAULT)
self.learn_batch_size = params.get('learn_batch_size', self.LEARN_BATCH_SIZE_DEFAULT)
# memory
self.memory_size = params.get('memory_size', self.REPLAY_BUFFER_SIZE_DEFAULT)
self.memory = ReplayBuffer(action_size, self.memory_size)
# online and target Q-network models
self.hidden_layer_sizes = params.get('hidden_layer_sizes', self.HIDDEN_LAYER_SIZES_DEFAULT)
self.online_network = FullyConnectedNetwork(self.state_size, self.hidden_layer_sizes, self.action_size)
self.target_network = FullyConnectedNetwork(self.state_size, self.hidden_layer_sizes, self.action_size)
self.optimizer = optim.Adam(self.online_network.parameters(), lr=self.alpha)
def select_action(self, state):
"""
Selects an epsilon-greedy action from the action space, using the
online_network and target_network to estimate action values.
"""
# with probability epsilon, explore by choosing a random action
if self.training and np.random.rand() < self.epsilon:
return np.random.choice(self.action_size)
# else, use the online network to choose the action it currently estimates to be the best one
state_tensor = torch.from_numpy(state).float()
state_tensor = state_tensor.unsqueeze(0) # wrap state in extra array so vector becomes a (single state) batch
state_tensor = state_tensor.to(self.device) # move the tensor to the configured device (cpu or cuda/gpu)
self.online_network.eval() # switch to evaluation mode for more efficient evaluation of the state tensor
with torch.no_grad():
action_values = self.online_network(state_tensor)
self.online_network.train() # and back to training mode
best_action = torch.argmax(action_values.squeeze()).numpy().item(0) # pick action with highest Q value
return best_action
def step(self, state, action, result):
next_state = result.vector_observations[0]
reward = result.rewards[0]
done = result.local_done[0]
self.memory.add(state, action, reward, next_state, done)
experiences = self.memory.sample(self.learn_batch_size)
self.learn(experiences, self.gamma) # learn every step
self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay) # decay epsilon up to minimum
return reward, done
def learn(self, experiences, gamma):
"""Performs gradient descent of the local network on the batch of experiences."""
# create pytorch tensors from the sampled experiences
states, actions, rewards, next_states, dones = self.tensorize_experiences(experiences)
# get estimated q values for next states from target_network
next_state_values = self.target_network(next_states) # get q for each action in next_states
next_targets = next_state_values.detach().max(1)[0].unsqueeze(1) # select maximum action value for each one
# calculate q values for current states: reward + (gamma * target value of next_state)
# multiplication by (1 - dones) sets next_state value to 0 if state was end of episode.
targets = rewards + (gamma * next_targets * (1 - dones))
# calculate expected q values for current state by evaluation through online_network
expecteds = self.online_network(states).gather(1, actions)
# calculate error between expected and target (= loss)
loss = F.mse_loss(expecteds, targets)
# minimize that loss to make the network perform better next time
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# update the target network's weights only slightly to stabilize training
# (instead of copying all weights every X episodes)
self.soft_update_target_network(self.online_network, self.target_network, 1e-3)
def tensorize_experiences(self, experiences):
"""Turns a set of experiences into separate tensors."""
states = torch.tensor(np.array([e.state for e in experiences])).float().to(self.device)
actions = torch.tensor(np.array([[e.action] for e in experiences])).long().to(self.device)
rewards = torch.tensor(np.array([[e.reward] for e in experiences])).float().to(self.device)
next_states = torch.tensor(np.array([e.next_state for e in experiences])).float().to(self.device)
dones = torch.tensor(np.array([[int(e.done)] for e in experiences]).astype(np.uint8)).float().to(self.device)
return states, actions, rewards, next_states, dones
def soft_update_target_network(self, source, target, tau):
"""Soft update target network weights from source."""
for target_w, source_w in zip(target.parameters(), source.parameters()):
# move target weights slightly closer to source weights.
target_w.data.copy_(tau * source_w.data + (1.0 - tau) * target_w.data)
def save_checkpoint(self, name='checkpoint'):
torch.save(self.online_network.state_dict(), 'saved_models/dqn_agent_{}.pth'.format(name))
def load_checkpoint(self, checkpoint):
weights = torch.load(checkpoint)
self.online_network.load_state_dict(weights)
self.target_network.load_state_dict(weights)
def get_params(self):
return {
'device': self.device,
'memory_size': self.memory_size,
'learn_batch_size': self.learn_batch_size,
'alpha': self.alpha,
'gamma': self.gamma,
'epsilon': self.epsilon,
'epsilon_decay': self.epsilon_decay,
'epsilon_min': self.epsilon_min,
'online_network': self.online_network,
'target_network': self.target_network
}
```
#### File: jeroenmoons/drl_banana/train.py
```python
import config
import sys
import numpy as np
import matplotlib.pyplot as plt
from unityagents import UnityEnvironment
from agent.factory import AgentFactory
def train(env, agent):
"""
Performs the main training loop.
"""
max_score = 0
scores = []
scores_avg = []
iterations = 0
solved = False
print('Training agent.')
while iterations < config.MAX_ITERATIONS and not solved:
# show a progress indication
print('\repisode {}, max score so far is {}'.format(iterations, max_score), end='')
sys.stdout.flush()
iterations += 1
done = False
score = 0
env_info = env.reset(train_mode=True)[agent.brain_name] # reset the environment
episode_steps = 0
while not done and episode_steps < config.MAX_EPISODE_STEPS:
episode_steps += 1
state = env_info.vector_observations[0]
action = agent.select_action(state) # choose an action
env_info = env.step(action)[agent.brain_name] # execute that action
reward, done = agent.step(state, action, env_info) # give the agent the chance to learn from the results
score += reward # update score with the reward
scores.append(score) # keep track of the episode score
avg_score = np.mean(scores[-100:]) # calculate average score over the last 100 episodes
scores_avg.append(avg_score) # keep track of the average score
max_score = score if max_score < score else max_score # keep track of max score so far
# print periodic progress report
if iterations % 100 == 0:
print('\rIteration {} - avg score of {} over last 100 episodes'.format(iterations, avg_score))
agent.save_checkpoint(name='checkpoint')
# if the environment is solved, stop training
if not solved and avg_score > config.SOLVED_SCORE:
print('\rEnvironment solved in {} iterations with a score of {}'.format(iterations, avg_score))
solved = True
agent.save_checkpoint(name='solved')
print('Training ended with an avg score of {} over last 100 episodes'.format(scores_avg[-1]))
plot_scores(scores, scores_avg)
return scores
def plot_scores(scores, scores_avg):
"""Creates plots of score track record."""
# plot all scores
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# plot average scores
plt.plot(np.arange(len(scores_avg)), scores_avg)
plt.ylabel('Avg Score over last 100 eps')
plt.xlabel('Episode #')
plt.show()
if __name__ == '__main__':
"""
This shows an agent performing a single episode.
"""
print('training a new agent to master {}'.format(config.ENV_APP))
# Create the Unity environment
banana_env = UnityEnvironment(file_name=config.ENV_APP)
# Select the brain (= Unity ML agent) to work with and examine action space
brain_name = banana_env.brain_names[0]
brain = banana_env.brains[brain_name]
action_size = brain.vector_action_space_size
# Examine state space
initial_env_info = banana_env.reset(train_mode=True)[brain_name]
state_size = len(initial_env_info.vector_observations[0])
# Create a new DQN agent
agent_factory = AgentFactory()
an_agent = agent_factory.create_agent('dqn_new', brain_name, state_size, action_size)
an_agent.training = True # True is default, but just in case
print('Agent params: {}'.format(an_agent.get_params()))
# Train the agent
result = train(banana_env, an_agent)
# Close the environment, no longer needed
banana_env.close()
print("Max score: {}".format(np.array(result).max()))
```
|
{
"source": "jeroennijhuis/Energy",
"score": 3
}
|
#### File: Raspberry Pi/SmartMeter/EnergyData.py
```python
from EnergyTariff import EnergyTariff
class EnergyData:
ConsumedRate1 = -1
ConsumedRate2 = -1
ReturnedRate1 = -1
ReturnedRate2 = -1
Tariff = EnergyTariff.Unknown
Consumed = -1
Returned = -1
Gas = -1
def IsValid(self):
return self.ConsumedRate1 >= 0 \
and self.ConsumedRate2 >= 0 \
and self.ReturnedRate1 >= 0 \
and self.ReturnedRate2 >= 0 \
and self.Tariff != EnergyTariff.Unknown \
and self.Consumed >= 0 \
and self.Returned >= 0 \
and self.Gas >= 0
```
|
{
"source": "jeroenpeters1986/clockify-api-aclient",
"score": 3
}
|
#### File: clockify_api_client/models/project.py
```python
import logging
from urllib.parse import urlencode
from clockify_api_client.abstract_clockify import AbstractClockify
class Project(AbstractClockify):
def __init__(self, api_key, api_url):
super(Project, self).__init__(api_key=api_key, api_url=api_url)
def get_projects(self, workspace_id, params=None):
"""Returns projects from given workspace with applied params if provided.
:param workspace_id Id of workspace.
:param params Dictionary with request parameters.
:return List of projects.
"""
try:
if params:
url_params = urlencode(params, doseq=True)
url = self.base_url + '/workspaces/' + workspace_id + '/projects?' + url_params
else:
url = self.base_url + '/workspaces/' + workspace_id + '/projects/'
return self.get(url)
except Exception as e:
logging.error("API error: {0}".format(e))
raise e
def add_project(self, workspace_id, project_name, client_id, billable=False, public=False, color="#16407B"):
"""Add new project into workspace.
:param workspace_id Id of workspace.
:param project_name Name of new project.
:param client_id Id of client.
:param billable Bool flag.
:return Dictionary representation of new project.
"""
try:
url = self.base_url + '/workspaces/' + workspace_id + '/projects/'
data = {
'name': project_name,
"clientId": client_id,
"isPublic": "true" if public else "false",
"billable": billable
}
return self.post(url, data)
except Exception as e:
logging.error("API error: {0}".format(e))
raise e
```
|
{
"source": "JeroenProoth/BDO-MARKET-API",
"score": 3
}
|
#### File: BDO-MARKET-API/market_api/client.py
```python
import requests
class Client():
base_url = 'https://marketweb-eu.blackdesertonline.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
}
cookies = {
'ASP.NET_SessionId': None,
'__RequestVerificationToken': None,
}
data = {
'__RequestVerificationToken': None,
'keyType' : '0',
'subKey' : '0', #Grade of the item. 0 is +0, 1 is +1 (or Pri for accessories)
'isUp' : 'true',
'mainKey' : None,
}
def __init__(self, session_id, cookie_token, form_token):
self.http = requests.session()
self.cookies['ASP.NET_SessionId'] = session_id
self.cookies['__RequestVerificationToken'] = cookie_token
self.data['__RequestVerificationToken'] = form_token
def set_item(self, item_id):
self.data['mainKey'] = item_id
def set_main_category(self, category_id):
self.data['mainCategory'] = category_id
def set_sub_category(self, category_id):
self.data['subCategory'] = category_id
def connect(self, method = None):
try:
request = self.http.post(
self.base_url + str(method),
cookies = self.cookies,
headers = self.headers,
data = self.data
)
except requests.exceptions.ConnectionError:
print('Connection Refused')
return
if request.status_code == 200:
if request.text:
data = request.json()
error = data.get('error')
if not error:
return data
else:
print('Bad Request for url: {}.'.format(request.url))
```
#### File: JeroenProoth/BDO-MARKET-API/market_scrubber.py
```python
import time
import pandas as pd
from market_api.methods import Methods
from masteries.cooking import CookingMastery
from webscraper import WebScraper
class MarketScrubber():
def __init__(self, session_id, cookie_token, form_token, item_dataframe, material_groups):
self.methods = Methods(session_id, cookie_token, form_token)
self.item_dataframe = item_dataframe
self.material_groups = material_groups
def get_item_id(self, item_name):
"""Returns item_id given an item_id."""
return self.item_dataframe.loc[(self.item_dataframe.name).apply(lambda x : x.casefold()) == str(item_name).casefold()].mainKey.values[0]
def get_item_name(self, item_id):
"""Returns item_name given an item_id."""
return self.item_dataframe.loc[self.item_dataframe.mainKey == int(item_id)].name.values[0]
def get_market_depth(self, item):
"""Returns a pandas DataFrame sorted from low -> high.
Market depth is given by the buy- and sellCounts.
returns a DataFrame
"""
if not item.isdigit():
item = self.get_item_id(item)
data = self.methods.get_item_sell_buy_info(item)['marketConditionList']
market_conditions = pd.DataFrame.from_dict(data)[['pricePerOne', 'buyCount', 'sellCount']]
return market_conditions
def get_items_sold(self, item, time_format = 'unix'):
"""Returns the total amount of items sold for a given item.
returns a tuple with (time_data (unix or local), total items sold)
time_format: 'unix' (default) or 'local'.
"""
if not item.isdigit():
item = self.get_item_id(item)
time_data = time.time()
if time_format == 'local':
time_data = time.strftime("%d:%m:%y %H:%M:%S", time.localtime(time_data))
data = self.methods.get_world_market_sub_list(item)['detailList']
items_sold = data[0]['totalTradeCount']
return (time_data, items_sold)
def get_item_price(self, item):
"""Returns the price of an item.
"""
if not item.isdigit():
item = self.get_item_id(item)
data = self.methods.get_world_market_sub_list(item)['detailList']
item_value = data[0]['pricePerOne']
return item_value
def calculate_recipe_profitability(self, recipe, mastery):
"""Returns the input and output value of a recipe based on mastery level.
Used formulas provided by https://docs.google.com/spreadsheets/d/1D7mFcXYFm4BUS_MKxTvgBY2lXkGtwWqn2AW91ntUvzE/edit#gid=1519713712
returns a tuple (input value, output value)
"""
cooking_master = CookingMastery(mastery)
max_proc_chance = cooking_master.regular_rare_max_chance()
rare_proc_chance = cooking_master.rare_proc_chance()
outputs = recipe['output']
inputs = recipe['input']
input_value = 0
output_value = 0
for item, amount in inputs.items():
input_value += float(amount) * self.get_item_price(item)
for item, rarity in outputs.items():
if rarity == 'normal':
items_created = 2.5 + 1.5 * max_proc_chance
output_value += items_created * self.get_item_price(item)
else:
items_created = (1.5 + 0.5 * max_proc_chance) * (0.2 + rare_proc_chance)
output_value += items_created * self.get_item_price(item)
return (input_value, output_value)
```
#### File: JeroenProoth/BDO-MARKET-API/webscraper.py
```python
import requests
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class WebScraper():
def __init__(self, item_dataframe):
self.item_dataframe = item_dataframe
def get_item_id(self, item_name):
"""Returns item_id given an item_id."""
return self.item_dataframe.loc[(self.item_dataframe.name).apply(lambda x : x.casefold()) == str(item_name).casefold()].mainKey.values[0]
def get_item_name(self, item_id):
"""Returns item_name given an item_id."""
return self.item_dataframe.loc[self.item_dataframe.mainKey == int(item_id)].name.values[0]
def get_recipe(self, item):
if not item.isdigit():
item = self.get_item_id(item)
URL = 'https://bdocodex.com/us/item/{}/'.format(item)
page = requests.get(URL)
driver = webdriver.Chrome()
driver.get(URL)
# WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME, "dt-reward")))
materials = []
total_time = 0
recipe = {self.get_item_name(item) : {'output' : {}, 'input' : {}}}
""" Sometimes it doesn't work, so I did this.
This work quite consistently within 2 seconds, so don't change it.
I tried all kinds of shit with WebDriverWait, I couldn't make it work.
"""
while not materials:
page = BeautifulSoup(driver.page_source, 'lxml')
product = page.find(id='tabs-productofrecipe')
materials = product.find_all("td", {"class": "dt-reward"}, limit = 2)
time.sleep(1)
total_time +=1
if total_time > 60:
break
inputs = materials[0]
outputs = materials[1]
lines = inputs.find_all("a", class_ = 'qtooltip')
for line in lines:
amount = line.find("div", class_='quantity_small nowrap')
for attr in str(line).split(" "):
if 'data-id' in attr:
item_id = attr.replace('data-id=', '').replace('item--', '').replace('"', '').replace('--', ' ')
if amount != None:
amount = int(amount.text)
else:
amount = 1
recipe[self.get_item_name(item)]['input'][item_id] = amount
lines = outputs.find_all("a", class_ = 'qtooltip')
for line in lines:
amount = line.find("div", class_='quantity_small nowrap')
for attr in str(line).split(" "):
if 'data-id' in attr:
item_id = attr.replace('data-id=', '').replace('item--', '').replace('"', '').replace('--', ' ')
if amount != None:
amount = int(amount.text)
else:
amount = 1
if len(recipe[self.get_item_name(item)]['output']) < 1:
recipe[self.get_item_name(item)]['output'][item_id] = 'normal'
else:
recipe[self.get_item_name(item)]['output'][item_id] = 'rare'
return recipe
def get_material_group(self, material_group):
"""There are a total of 56 material groups"""
try:
group = material_group.split(" ")[1]
group = {material_group : {} }
page = requests.get('https://bdocodex.com/us/materialgroup/{}/'.format(group))
soup = BeautifulSoup(page.content, 'html.parser')
insider = soup.find("div", class_='insider')
for line in insider:
for attr in str(line).split(" "):
if 'data-id' in attr:
item_id = attr.replace('data-id=', '').replace('item--', '').replace('"', '').replace('--', ' ')
group[material_group][self.get_item_name(item_id)] = item_id
return group
except (IndexError, TypeError):
return None
```
|
{
"source": "jeroenschoonderbeek/Clappform-Python-Connector",
"score": 3
}
|
#### File: Clappform-Python-Connector/Clappform/collection.py
```python
from .settings import settings
from .dataFrame import _DataFrame
from .item import _Item
from .auth import Auth
import requests
class _Collection:
app_id = None
id = None
def __init__(self, app, collection = None):
self.app_id = app
self.id = collection
def DataFrame(self):
return _DataFrame(self.app_id, self.id)
def Item(self, item = None):
return _Item(self.app_id, self.id, item)
def ReadOne(self, extended = False):
if not Auth.tokenValid():
Auth.refreshToken()
extended = str(extended).lower()
response = requests.get(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.id + '?extended=' + extended, headers={'Authorization': 'Bearer ' + settings.token})
if response.json()["code"] is 200:
return response.json()["data"]
else:
raise Exception(response.json()["message"])
def Create(self, slug, name, encryption):
if not Auth.tokenValid():
Auth.refreshToken()
response = requests.post(settings.baseURL + 'api/metric/' + self.app_id, json={
'slug': slug,
'name': name,
'encryption': encryption
}, headers={
'Authorization': 'Bearer ' + settings.token
})
if response.json()["code"] is 200:
return _Collection(self.app_id, id)
else:
raise Exception(response.json()["message"])
def Update(self, slug = None, name = None, encryption = None):
if not Auth.tokenValid():
Auth.refreshToken()
properties = {}
if name is not None:
properties["name"] = name
if slug is not None:
properties["slug"] = slug
if encryption is not None:
properties["encryption"] = encryption
response = requests.post(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.id, json=properties, headers={
'Authorization': 'Bearer ' + settings.token
})
if response.json()["code"] is 200:
return _Collection(self.app_id, id)
else:
raise Exception(response.json()["message"])
def Delete(self):
if not Auth.tokenValid():
Auth.refreshToken()
response = requests.delete(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.id, headers={'Authorization': 'Bearer ' + settings.token})
if response.json()["code"] is 200:
return True
else:
raise Exception(response.json()["message"])
def Empty(self):
if not Auth.tokenValid():
Auth.refreshToken()
response = requests.delete(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.id + '/dataframe', headers={'Authorization': 'Bearer ' + settings.token})
if response.json()["code"] is 200:
return True
else:
raise Exception(response.json()["message"])
```
#### File: Clappform-Python-Connector/Clappform/dataFrame.py
```python
from .settings import settings
import pandas as pd
import json
import requests
import math
from .auth import Auth
class _DataFrame:
app_id = None
collection_id = None
def __init__(self, app, collection):
self.app_id = app
self.collection_id = collection
def Read(self):
if not Auth.tokenValid():
Auth.refreshToken()
response = requests.get(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id, headers={
'Authorization': 'Bearer ' + settings.token
})
data = []
loopCount = math.ceil(response.json()["data"]["items"] / 500)
for x in range(0, loopCount):
response = requests.get(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id + '?extended=true&offset=' + str(x * 500), headers={
'Authorization': 'Bearer ' + settings.token
})
for item in response.json()["data"]["items"]:
data.append(item["data"])
return pd.DataFrame(data)
def Synchronize(self, dataframe):
if not Auth.tokenValid():
Auth.refreshToken()
response = requests.put(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id + '/dataframe', json=json.loads(dataframe.to_json(orient='index')), headers={
'Authorization': 'Bearer ' + settings.token
})
if response.json()["code"] is 200:
return True
else:
raise Exception(response.json()["message"])
def Append(self, dataframe):
if not Auth.tokenValid():
Auth.refreshToken()
dataframe.reset_index(inplace=True, drop=True)
response = requests.get(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id, headers={
'Authorization': 'Bearer ' + settings.token
})
if 'index' in dataframe:
dataframe = dataframe.drop(columns=["index"])
offset = response.json()["data"]["items"]
count = 0
for x in range(0 + offset, len(dataframe.index) + offset):
if (count + 1) % 100 is 0:
portion = dataframe.iloc[x - 99 - offset:x + 1 - offset]
portion.reset_index(inplace=True, drop=True)
if 'index' in portion:
portion = portion.drop(columns=["index"])
portion.index += offset + count - 99
items = json.loads(portion.to_json(orient='index'))
response = requests.post(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id + '/dataframe', json=items, headers={
'Authorization': 'Bearer ' + settings.token
})
elif len(dataframe.index) + offset == x + 1:
portion = dataframe.tail(len(dataframe.index) - int(math.floor(len(dataframe.index) / 100.0)) * 100)
portion.reset_index(inplace=True, drop=True)
if 'index' in portion:
portion = portion.drop(columns=["index"])
portion.index += offset + count - 99
items = json.loads(portion.to_json(orient='index'))
response = requests.post(settings.baseURL + 'api/metric/' + self.app_id + '/' + self.collection_id + '/dataframe', json=items, headers={
'Authorization': 'Bearer ' + settings.token
})
count += 1
return True
```
|
{
"source": "JeroenSlobbe/Scripts",
"score": 4
}
|
#### File: Scripts/Crypto CTF challenges/XOR challenge.py
```python
import os
def encrypt(key: bytes, data: bytes) -> bytes:
cipherText = b''
for i in range(len(data)):
cipherText += bytes([data[i] ^ key[i % len(key)]])
return cipherText;
def decrypt(key: bytes, data: bytes) -> bytes:
plaintext = b''
for i in range(len(data)):
plaintext += bytes([data[i] ^ key[i % len(key)]])
return plaintext
def generateKey(length) -> bytes:
returnKey = b''
returnKey = os.urandom(length);
return returnKey;
def testWalk():
print("\nGenerating random key with keylength 4:")
key = generateKey(4)
print(key)
print("\nEncrypting message: 'testwalk':")
input = b'testwalk'
cipher = encrypt(key, input)
print(cipher)
plaintext = decrypt(key, cipher)
print("\nDecrypting cipher:")
print(plaintext)
def main():
# Because the key is re-used time after time, and we know the first 4 bytes due to default flag format, we can recover the key and decrypt the whole message
# Lesson learned, in python3, starting with b'' defines something as a bytestring. Using bytes.fromhex creates a bytestring based on something that once was a normal string
key = b'HTB{'
input = bytes.fromhex("134af6e1297bc4a96f6a87fe046684e8047084ee046d84c5282dd7ef292dc9")
# Using the gibberish would also work, but from hex looks much cleaner
# Input = b'\x13J\xf6\xe1){\xc4\xa9oj\x87\xfe\x04f\x84\xe8\x04p\x84\xee\x04m\x84\xc5(-\xd7\xef)-\xc9'
realKey = decrypt(key,input)[0:4]
print(decrypt(realKey,input))
main()
```
#### File: Scripts/CSV crawlers and parsers/ICS-CERT2CSV.py
```python
import re
from urllib.request import urlopen,Request
def getExploitability(input,link):
result = "unkown"
# List that indicates unknown (not to be coded as this is default
# Public exploits may exist that could target this vulnerability.
# List of explicit mentioning of no exploit code
ne1 = "No known public exploits specifically target this vulnerability"
ne2 = "No known public exploits specifically target these vulnerabilities"
ne4 = "No known exploits specifically target this vulnerability."
ne5 = "No known exploits specifically target these vulnerable components."
ne7 = "No known exploits specifically target these vulnerabilities."
ne8 = "No known public exploits specifically target the other"
ne11 = "No known public exploits have targeted this vulnerability."
ne12 = "No known exploits are specifically targeting this vulnerability."
ne16 = "No known public exploits exist that target these vulnerabilities."
ne20 = "No known publicly available exploit exists"
ne23 = "No known public exploits specifically target this vulnerability"
ne25 = "No publicly available exploits are known to specifically target this vulnerability."
ne26 = "No known public exploit specifically targets this vulnerability."
ne27 = "No known public exploits specifically target these products"
ne28 = "No known public exploits specifically target this vulnerability "
ne31 = "No known public exploits exist that target this vulnerability."
ne10 = "No publicly available exploit is known to exist."
ne3 = "No publicly available exploits are known to exist for this vulnerability."
ne13 = "No publicly known exploits specifically target these vulnerabilities"
ne18 = "No publicly available exploit code is known to exist that specifically targets this vulnerability."
ne24 = "No publicly known exploits specifically target this vulnerability."
ne32 = "No publicly available exploits specifically targeting these vulnerabilities are known to exist."
ne15 = "No exploits are known specifically to target this vulnerability."
ne30 = "No exploits are known that target this vulnerability."
ne9 = "No exploits are known that specifically target this vulnerability"
ne17 = "Exploits that target these vulnerabilities are not publicly available."
ne19 = "Exploits that target this vulnerability are not known to be publicly available."
ne6 = "There are currently no publicly known exploits specifically targeting this vulnerability."
ne29 = "There are currently no known exploits specifically targeting these vulnerabilities."
ne21 = "There are currently no known exploits specifically targeting this vulnerability."
ne22 = "ICS-CERT is unaware of any exploits that target this vulnerability."
ne14 = "Currently, no known exploits are specifically targeting this vulnerability."
# List of explicit mentioning of exploit code
e1 = "Public exploits are known to target this vulnerability."
e11 = "Public exploits are known to target these vulnerabilities."
e6 = "public exploits are available"
e12 = "Public exploits are available."
e8 = "publicly available exploit code is known to exist that targets these vulnerabilities."
e17 = "Public exploits are known to exist that target these vulnerabilities."
e27 = "Public exploits are known that target these vulnerabilities."
e29 = "Public exploits are known that specifically target this vulnerability."
e7 = "Exploits that target this vulnerability are known to be publicly available."
e2 = "Exploits that target these vulnerabilities are publicly available."
e10 = "Exploits that target this vulnerability are publicly available."
e4 = "Exploit code is publicly available for each of the vulnerabilities"
e5 = "Exploit code is publicly available for these vulnerabilities."
e15 = "Exploits that target these vulnerabilities exist and are publicly available."
e16 = "Exploits that target some of these vulnerabilities are known to be publicly available."
e18 = "Exploit code specifically targeting this vulnerability has been released"
e22 = "Exploits that target some of these vulnerabilities are publicly availabl"
e24 = "Exploits that target some vulnerabilities are publicly available."
e28 = "Exploit code for this vulnerability is publicly available"
e30 = "Exploit code for this vulnerability has been recently published."
e3 = "An exploit that targets one of these vulnerabilities is publicly available"
e9 = "An exploit of this vulnerability has been posted publicly."
e13 = "Publicly released PoC code exists for these vulnerabilities."
e14 = "Public exploit(s) are known to target these vulnerabilities."
e19 = "An exploit targeting this vulnerability is publicly available."
e20 = "This exploit is publicly known and available."
e21 = "An exploit for this vulnerability is publicly available."
e23 = "Publicly available exploits are known to specifically target vulnerabilities"
e25 = "Known exploits are now targeting this vulnerability."
e26 = "The researcher has publicly released exploits that specifically target these vulnerabilities."
# List of exploit demonstrated but not disclosed
nde1 = "Exploitation of vulnerabilities has been publicly demonstrated; however, exploit code is not publicly available."
nde2 = "General exploits are publicly available that utilize this attack vector"
nde3 = "No known public exploits specifically target this vulnerability, but information regarding this vulnerability has been publicly disclosed."
nde4 = "No exploit code is known to exist beyond the test code developed by the researcher"
nde5 = "Detailed vulnerability information is publicly available that could be used to develop an exploit that targets these vulnerabilities."
nde6 = "Detailed vulnerability information is publicly available that could be used to develop an exploit that targets this vulnerability."
nde7 = "Proof-of-concept code is expected to be made public by the researcher."
nde8 = "No known public exploits specifically target this vulnerability; however, common techniques may be used to exploit."
nde9 = "Exploits that target these vulnerabilities are potentially available"
nde10 = "Public exploits that target these vulnerabilities may exist."
# Metasploit / easy tool available
ae1 = "A Metasploit module is publicly available."
ae2 = "Tools are publicly available that aid in exploiting this cross-site scripting vulnerability."
ae3 = "Malware and public exploits are known to target this vulnerability."
ae4 = "Tools are publicly available that could aid in exploiting this"
if input.count(ne32) + input.count(ne31) + input.count(ne30) + input.count(ne29) + input.count(ne28) + input.count(ne27) + input.count(ne26) + input.count(ne25) + input.count(ne24) + input.count(ne23) + input.count(ne22) + input.count(ne21) + input.count(ne20) + input.count(ne19) + input.count(ne18) + input.count(ne17) + input.count(ne16) + input.count(ne15) + input.count(ne14) + input.count(ne13) + input.count(ne12) + input.count(ne11) + input.count(ne10) + input.count(ne9) + input.count(ne8) + input.count(ne7) + input.count(ne6) + input.count(ne1) + input.count(ne2) + input.count(ne3) + input.count(ne4) + input.count(ne5) > 0:
result = "no"
elif input.count(ae4) + input.count(ae3) + input.count(ae2) + input.count(ae1):
result="yes, autoexploit"
elif input.count(e30) + input.count(e29) + input.count(e28) + input.count(e27) + input.count(e26) + input.count(e25) + input.count(e24) + input.count(e23) + input.count(e22) + input.count(e21) + input.count(e20) + input.count(e19) + input.count(e18) + input.count(e17) + input.count(e16) + input.count(e15) + input.count(e14) + input.count(e13) + input.count(e12) + input.count(e1) + input.count(e2) + input.count(e3) + input.count(e4) + input.count(e5) + input.count(e6) + input.count(e7) + input.count(e8) + input.count(e9) + input.count(e10) + input.count(e11) > 0:
result = "yes"
elif input.count(nde10) + input.count(nde9) + input.count(nde8) + input.count(nde7) + input.count(nde6) + input.count(nde5) + input.count(nde4) + input.count(nde3) + input.count(nde2) + input.count(nde1) > 0:
result = "Partialy"
else:
# print(link) #DEBUG
result = "unkown"
return result
def getCVSS(input,link):
result = ""
if(len(re.findall("CVSS",input, re.IGNORECASE)) > 0):
CVSSOption1 = "CVSS V2 base score of ([0-9]*\.?[0-9])"
CVSSOption2 = "CVSS v3 ([0-9]*\.?[0-9])"
CVSSOption3 = "CVSS v3 base score of ([0-9]*\.?[0-9])"
CVSSregex = CVSSOption1 + "|" + CVSSOption2 + "|" + CVSSOption3
CVSS = re.findall(CVSSregex,input, re.IGNORECASE)
for item in CVSS:
result = max(item)
else:
result = "unspecified"
#print("No CVSS specified for: " + link) #DEBUG
return result
def getAffectedVersions(input,link):
result = ""
affectedVersions = ""
# Versions: Based on website structure, products are in list items after header2. First and last contain generic stuff (so lets remove that). Afterthat clean up the data.
rav1 = "<h2>Affected Products</h2>(.*?)<h2"
rav2 = "AFFECTED PRODUCTS</h3>(.*?)<h3"
rav3 = "<h2>AFFECTED PRODUCTS</h2>(.*?)<h2"
rav = rav1 + "|" + rav2 + "|" + rav3
affectedVersionsList = re.findall(rav, input)
for item in affectedVersionsList:
if(len(re.findall("<li>",max(item), re.IGNORECASE)) > 0):
affectedVersions = max(item).split("<li>")[1:]
for itemAV in affectedVersions:
result = result + itemAV.replace('</li>','').replace('</ul>','') + ";"
elif(len(re.findall("<li class=\"BodyTextBlack",max(item), re.IGNORECASE)) > 0):
affectedVersions = max(item).split("<li")[1:]
for itemAV in affectedVersions:
result = result + itemAV.replace('</li>','').replace('</ul>','') + ";"
elif(len(re.findall("<li class=\"MsoListBullet",max(item), re.IGNORECASE)) > 0):
affectedVersions = max(item).split("<li")[1:]
for itemAV in affectedVersions:
result = result + itemAV.replace('</li>','').replace('</ul>','').replace(' class="MsoListBullet">','') + ";"
elif(len(re.findall("<li class=\"margin-left: 40px;",max(item), re.IGNORECASE)) > 0):
affectedVersions = max(item).split("<li")[1:]
for itemAV in affectedVersions:
result = result + itemAV.replace('</li>','').replace('</ul>','').replace(' style="margin-left: 40px;">','') + ";"
elif(len(re.findall("<p>(.*?)</p>",max(item), re.IGNORECASE)) > 0):
result = re.findall("<p>(.*?)</p>",max(item), re.IGNORECASE)[0]
# Improve data quality
g1 = 'class="BodyTextBlack" style="margin: 6pt 0in;">'
g2 = '<ul style="margin-left: 40px;">'
g3 = "</p>"
g4 = "<p>"
g5 = "<ul>"
g6 = '<font face="Times New Roman"><font face="Times New Roman">'
g7 = "</font></font>;"
g8 = '<p class="red_title">>'
g9 = '<h4>'
g10 = "</h4>"
g11 = '<p class="red_title">><strong>3</strong><strong> --------</strong>From CIMPLICITY 6.1 forward, users have been advised that S90 drivers were no longer supported and an alternate tool was provided. CIMPLICITY 9.5 removed the drivers from the product.<p class="red_title"><strong>--------- End Update A Part 1 of 3 ----------</strong>'
g12 = '<ul style="list-style-type:circle">'
g17 = '<ul style="list-style-type:circle;">'
g13 = '<h3 style="color:red;">></h3>'
g14 = '<div class="red_title"><strong>--------- End Update C Part 1 of 2 ---------</strong></div>'
g15 = '<h3 class="red_title">></h3>'
g16 = '<div class="red_title"><strong>--------- End Update A Part 2Â of 4Â ---------</strong></div>'
g19 = '<div class="red_title"><strong>--------- End Update A Part 3 of 5 --------</strong></div>'
g18 = '<h3 style="color: red;">></h3>'
g20 = '<h3 style="color:red;">></h3>'
try:
start = 0
stop = 0
if(len(re.findall("<strong>",max(item),re.IGNORECASE)) > 0):
start = result.find('<strong>')
stop = result.find('</strong>') + 8
result = result[:start] + result[stop:]
if(len(re.findall("<em>",max(item),re.IGNORECASE)) > 0):
start = result.find('<em>')
stop = result.find('</em>') + 5
result = result[:start] + result[stop:]
if(len(re.findall('<div class="red_title">',max(item),re.IGNORECASE)) > 0):
start = result.find('<div class="red_title">')
stop = result.find('</div>') + 6
result = result[:start] + result[stop:]
result = result.replace(g20,'').replace(g19,'').replace(g18,'').replace(g17,'').replace(g16,'').replace(g15,'').replace(g14,'').replace(g1,'').replace(g2,'').replace(g3,'').replace("<br />",';').replace(g4,'').replace(g5,'').replace(g6,'').replace(g7,'').replace(g8,'').replace(g9,'').replace(g10,'').replace(g11,'').replace(g12,'').replace(g13,'')
except:
print(link)
pass
return(result)
def getVulType(input,link):
result = ""
# Vuln types are based on CVE types: https://cwe.mitre.org/data/definitions/1008.html
mitrefile = open("mitredefinitions.source",'r')
rv = ""
for line in mitrefile:
rv = rv + re.escape(line.split(' - (')[0]) + "|"
vulnTypes = re.findall(rv, input, re.IGNORECASE)
tmpArray = []
for v in vulnTypes:
if (v.strip() and (v.lower() not in tmpArray)):
tmpArray.append(v.lower())
result = result + v + ";"
return(result)
def mainProgram():
textfile = open("data.txt", 'r')
crawler = {'User-Agent': "ICS-Info-Crawler"}
#fullList = urlopen(Request(url="https://ics-cert.us-cert.gov/advisories-by-vendor", headers=crawler)).read().decode('ISO-8859-1')
#vendorChunk = str(((fullList.split('<div class="view-content">')[1]).split('</div></section>')[0]).encode("utf-8","ignore"))
staticURL = "https://www.us-cert.gov/ics/advisories-by-vendor?page="
currentVendor = ""
advisories = ""
link = ""
product = ""
CVSS = ""
exploitability = ""
versions = ""
vultypes = ""
result = ""
for x in range(0, 12):
tmpUrl = staticURL + str(x)
tmpList = urlopen(Request(url=tmpUrl, headers=crawler)).read().decode('ISO-8859-1')
tmpVendors = tmpList.split('<div class="view-content">')[1].split('<nav class="pager-nav text-center"')[0].replace('<div class="item-list">','').split('<h3>')
for tmpVendor in tmpVendors:
currentVendor = tmpVendor.split("</h3>")[0]
advisories = re.findall("<a href=\"(.*)</a>", tmpVendor)
for advisory in advisories:
tmpLinkProduct = "https://www.us-cert.gov/" + advisory.replace('" hreflang="en','')
link = tmpLinkProduct.split(">")[0].replace('"','')
product = tmpLinkProduct.split(">")[1]
vulnerabilityDetails = urlopen(Request(url=link, headers=crawler)).read().decode('ISO-8859-1')
CVSS = getCVSS(vulnerabilityDetails, link)
exploitability = getExploitability(vulnerabilityDetails, link)
versions = getAffectedVersions(vulnerabilityDetails, link)
vultypes = getVulType(vulnerabilityDetails,link)
result = str(currentVendor).replace(',','') + "," + str(link).replace(',','') + "," + str(CVSS).replace(',','') + "," + str(exploitability).replace(',','') + "," + str(versions).replace(',','') + "," + str(vultypes).replace(',','')
print(result.encode("utf-8","ignore"))
# Main program
print("Vendor, product, advisoryLink,CVSS, public exploit, affected versions, vulnerability type")
mainProgram()
```
|
{
"source": "jeroenstalenburg/judgment_aggregation",
"score": 3
}
|
#### File: data/constraints/CNF.py
```python
from .Constraint import Constraint
# import sympy as sp
import pycosat as ps
import shlex
class CNF(Constraint):
def reset(self):
self.lines = 0
self.clauses_loaded = 0
self.var_amount = 0
self.clause_amount = 0
self.clauses = []
# self.boolean_expression = sp.true
self.boolean_vars = []
self.p_initialised = False
def __str__(self):
return ("JA CNF constraint object {\n Clauses: %s\n}" % self.clauses)
def initialise_data(self, var_amount, clause_amount):
"""Initialise the data for the class for future use"""
self.clauses_loaded = 0
self.var_amount = var_amount
self.clause_amount = clause_amount
self.clauses = [[]] * clause_amount
# self.boolean_expression = sp.true
# self.boolean_vars = [var(str(i + 1)) for i in range(var_amount)]
self.p_initialised = True
def load_lines(self, iterable):
"""Load the contents of a iterable of strings, such as a file or
list of strings. The strings need to be in the valid format
args:
iterable: the iterable object with the correctly formatted lines
path: is used to resolve the location of files which may be needed
by the scenario."""
self.lines = 0
for line in iterable:
self.lines += 1
contents = shlex.split(line.replace('\n', ''))
if contents == []:
continue
if contents[0] == 'p':
self.load_p_line(*contents[1:])
else:
self.load_clause_line(*contents)
self.lines = 0
self.finalise()
def load_p_line(self, *args):
if self.p_initialised:
self.throw_error("May not redefine amount of issues and "
"judges in the middle of a cnf file")
if args[0] != "cnf":
self.throw_error("The given file is not a scenario file")
if len(args) != 3:
self.throw_error("Expected two arguments after 'p cnf'")
try:
var_amount = int(args[1])
clause_amount = int(args[2])
except ValueError:
self.throw_error("'%s' and/or '%s' not a number" %
(args[1], args[2]))
self.initialise_data(var_amount, clause_amount)
def load_clause_line(self, *args):
try:
clause = list(map(int, args))
except ValueError:
self.throw_error("clause line should consist of only integers")
if clause[-1] != 0:
self.throw_error("clause line should end with a 0")
self.add_clause(clause[:-1])
def add_clause(self, clause):
"""Add a clause to the current CNF constraint"""
self.check_initialised()
if (not all([type(c) == int for c in clause])):
self.throw_error("Clause must be consist of only integers")
if (self.clauses_loaded >= self.clause_amount):
self.throw_error("May not add more clauses than specified while "
"initialising the object")
if (not all([c != 0 and abs(c) <= self.var_amount for c in clause])):
self.throw_error("Variables referenced should be between 1 and "
"the amount given during initialisation (%s)" %
self.var_amount)
self.clauses[self.clauses_loaded] = clause
# sp_clause = sp.false
# for c in clause:
# atom = self.boolean_vars[abs(c) - 1]
# if c < 0:
# atom = ~atom
# sp_clause = sp_clause | atom
# self.boolean_expression = self.boolean_expression & sp_clause
# self.boolean_lambda = None
self.clauses_loaded += 1
def check_clause(self, clause, judgment):
"""Return True if any of the elements of the clause are True."""
for index in clause:
if (index < 0) ^ judgment[abs(index) - 1]:
return True
return False
def check_judgment(self, judgment):
"""Return True if the judgment satisfies the current costraint"""
for clause in self.clauses:
if not self.check_clause(clause, judgment):
return False
return True
def generate_all_valid_judgments(self):
"""Generate all valid judgments according to the current constraint"""
self.check_initialised()
for judgment in ps.itersolve(self.clauses, vars=self.var_amount):
yield list(map(lambda x: int(x > 0), judgment))
def get_var_amount(self):
"""Get the var amount of the current constraint"""
return self.var_amount
```
#### File: judgment_aggregation/test/test_scenario.py
```python
from unittest import TestCase
from ..data.Scenario import Scenario
class TestScenario(TestCase):
def test_creating_scenario(self):
s = Scenario()
s.load_file("kemenyslaterdiff.ja")
s.solve('ASP')
self.assertEqual(s.collective_judgments, [[0, 1, 1, 0, 0, 0, 1, 1]])
```
|
{
"source": "JeroenSwart/warmstart",
"score": 2
}
|
#### File: src/utils/thesis_utils.py
```python
import pandas as pd
from src.pipeline_optimization.bayesian_hopt import Config
from plotly.subplots import make_subplots
import plotly.graph_objects as go
def thesis_lookup_objective(name):
def objective(configs):
# import lookup table
lookup_table = pd.read_csv(
"../../data/metadata/raw/" + name + ".csv", index_col=0, header=[0, 1]
)
lookup_table.loc[:, ("hyperparameters", "learning_rate")] = lookup_table[
"hyperparameters"
]["learning_rate"].round(13)
idx = lookup_table.index[
(lookup_table["hyperparameters"]["max_depth"] == configs["max_depth"])
& (
lookup_table["hyperparameters"]["learning_rate"]
== configs["learning_rate"]
)
& (
lookup_table["hyperparameters"]["min_child_weight"]
== configs["min_child_weight"]
)
& (lookup_table["hyperparameters"]["subsample"] == configs["subsample"])
& (lookup_table["hyperparameters"]["num_trees"] == configs["num_trees"])
]
result = lookup_table.iloc[idx]["diagnostics"]["mae"].squeeze()
walltime = lookup_table.iloc[idx]["diagnostics"]["walltime"].squeeze()
crossval = lookup_table.iloc[idx]["crossval_diag"]["mae"].squeeze()
return result, walltime, crossval
return objective
def thesis_search_space():
search_space = {
"num_trees": Config(100, 800, granularity=6, rounding=1),
"learning_rate": Config(-2.5, -0.5, granularity=10, scale="log", rounding=13),
"max_depth": Config(5, 20, granularity=8, rounding=0),
"min_child_weight": Config(5, 40, granularity=3, rounding=1),
"subsample": Config(0.5, 1.0, granularity=3, rounding=2),
}
return search_space
def get_standard_dataset(dataset_name):
# load data
df = pd.read_csv("../../data/timeseries/raw/final_data.csv", index_col=0)
# select the dataset
split_name = dataset_name.split("_")
end_name = split_name[0] + "_target_" + split_name[1]
ex_name = split_name[0] + "_temp_" + split_name[1]
time_based_features = ["Hour of Day", "Day of Week", "Day of Year", "Holiday"]
data = df[[end_name, ex_name] + time_based_features].rename(
columns={end_name: "endogenous", ex_name: "exogenous"}
)
dataset = data.dropna(subset=["endogenous"])
# divide in training and test data
start = dataset.index.get_loc("2012-01-01 00:00:00+00:00")
train_nr = int(split_name[2])
dataset = dataset[start : start + train_nr]
dataset.index = pd.DatetimeIndex(dataset.index)
test_data = data.dropna(subset=["endogenous"])[
start + train_nr : start + train_nr + 365 * 24
]
test_data.index = pd.DatetimeIndex(test_data.index)
return dataset, test_data
def visualize_avg_performance_single_datasets(hopt_exp, sample_ids):
fig = make_subplots(rows=1, cols=len(sample_ids))
for i, sample_id in enumerate(sample_ids):
# transform to best so far dataframe
data = hopt_exp.best_so_far[sample_id].mean(level="iterations")
for identifier in [hopt.identifier for hopt in hopt_exp._hopts]:
fig.add_trace(
go.Scatter(y=data[identifier], name=identifier), row=1, col=i + 1
)
fig.update_layout(
title=sample_id,
xaxis=go.layout.XAxis(title="Iterations"),
yaxis=go.layout.YAxis(title="MAE"),
)
fig.update_layout(height=600, width=1500, title_text="Subplots")
fig.show()
```
|
{
"source": "jeroenterheerdt/nexia",
"score": 3
}
|
#### File: nexia/tests/test_home.py
```python
import json
import os
from os.path import dirname
import unittest
import pytest
from nexia.home import NexiaHome
def load_fixture(filename):
"""Load a fixture."""
test_dir = dirname(__file__)
path = os.path.join(test_dir, "fixtures", filename)
with open(path) as fptr:
return fptr.read()
class TestNexiaThermostat(unittest.TestCase):
"""Tests for nexia thermostat."""
def test_update(self):
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
nexia.update_from_json(devices_json)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
nexia.update_from_json(devices_json)
def test_idle_thermo(self):
"""Get methods for an idle thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "000000")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Downstairs East Wing")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 88.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.36)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.35)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.50)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
def test_idle_thermo_issue_33758(self):
"""Get methods for an idle thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(12345678)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "xxxxxx")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Thermostat")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 55.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.43)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 1)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.55)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), True)
self.assertEqual(thermostat.has_emergency_heat(), True)
self.assertEqual(thermostat.is_emergency_heat_active(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [12345678])
def test_idle_thermo_issue_33968_thermostat_1690380(self):
"""Get methods for an cooling thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [1690380])
thermostat = nexia.get_thermostat_by_id(1690380)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83037337, 83037340, 83037343])
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "removed")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Thermostat")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 80.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.55)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.41)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.41)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.5)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.55)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), True)
self.assertEqual(thermostat.is_emergency_heat_active(), False)
self.assertEqual(thermostat.get_system_status(), "Cooling")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), True)
def test_active_thermo(self):
"""Get methods for an active thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "0281B02C")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Master Suite")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 87.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.52)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.69)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.69)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.35)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.45)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "Cooling")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), True)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83394133, 83394130, 83394136, 83394127, 83394139])
@pytest.mark.skip(reason="not yet supported")
def test_xl624(self):
"""Get methods for an xl624 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(1111111)
self.assertEqual(thermostat.get_model(), None)
self.assertEqual(thermostat.get_firmware(), "2.8")
self.assertEqual(thermostat.get_dev_build_number(), "0603340208")
self.assertEqual(thermostat.get_device_id(), None)
self.assertEqual(thermostat.get_type(), None)
self.assertEqual(thermostat.get_name(), "Downstairs Hall")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), False)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Cycler"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), False)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), False)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [12345678])
def test_xl824_1(self):
"""Get methods for an xl824 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(2222222)
self.assertEqual(thermostat.get_model(), "XL824")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581314625")
self.assertEqual(thermostat.get_device_id(), "0167CA48")
self.assertEqual(thermostat.get_type(), "XL824")
self.assertEqual(thermostat.get_name(), "Family Room")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), True)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Circulate")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [88888888])
def test_xl824_2(self):
"""Get methods for an xl824 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(3333333)
self.assertEqual(thermostat.get_model(), "XL824")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581314625")
self.assertEqual(thermostat.get_device_id(), "01573380")
self.assertEqual(thermostat.get_type(), "XL824")
self.assertEqual(thermostat.get_name(), "Upstairs")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), True)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Circulate")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [99999999])
class TestNexiaHome(unittest.TestCase):
"""Tests for nexia home."""
def test_basic(self):
"""Basic tests for NexiaHome."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
self.assertEqual(nexia.get_name(), "Hidden")
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2059661, 2059676, 2293892, 2059652])
def test_basic_issue_33758(self):
"""Basic tests for NexiaHome."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
self.assertEqual(nexia.get_name(), "Hidden")
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [12345678])
class TestNexiaThermostatZone(unittest.TestCase):
"""Tests for nexia thermostat zone."""
def test_zone_issue_33968_zone_83037337(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037337)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Family Room")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 74)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Closed",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33968_zone_83037340(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037340)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Office")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 74)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33968_zone_83037343(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037343)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Master")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 68)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33758(self):
"""Tests for nexia thermostat zone relieving air."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(12345678)
zone = thermostat.get_zone_by_id(12345678)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Thermostat NativeZone")
self.assertEqual(zone.get_cooling_setpoint(), 73)
self.assertEqual(zone.get_heating_setpoint(), 68)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Run Schedule - None",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), False)
def test_zone_relieving_air(self):
"""Tests for nexia thermostat zone relieving air."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
zone = thermostat.get_zone_by_id(83394133)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Bath Closet")
self.assertEqual(zone.get_cooling_setpoint(), 79)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Relieving Air",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_cooling_air(self):
"""Tests for nexia thermostat zone cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
zone = thermostat.get_zone_by_id(83394130)
self.assertEqual(zone.get_name(), "Master")
self.assertEqual(zone.get_cooling_setpoint(), 71)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_idle(self):
"""Tests for nexia thermostat zone idle."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
zone = thermostat.get_zone_by_id(83261002)
self.assertEqual(zone.get_name(), "Living East")
self.assertEqual(zone.get_cooling_setpoint(), 79)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_xl824_idle(self):
"""Tests for nexia xl824 zone idle."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(3333333)
zone = thermostat.get_zone_by_id(99999999)
self.assertEqual(zone.get_name(), "Upstairs NativeZone")
self.assertEqual(zone.get_cooling_setpoint(), 74)
self.assertEqual(zone.get_heating_setpoint(), 62)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
class TestNexiaAutomation(unittest.TestCase):
def test_automations(self):
"""Get methods for an active thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
automation_ids = nexia.get_automation_ids()
self.assertEqual(
automation_ids,
[3467876, 3467870, 3452469, 3452472, 3454776, 3454774, 3486078, 3486091],
)
automation_one = nexia.get_automation_by_id(3467876)
self.assertEqual(automation_one.name, "Away for 12 Hours")
self.assertEqual(
automation_one.description,
"When IFTTT activates the automation Upstairs West Wing will "
"permanently hold the heat to 62.0 and cool to 83.0 AND "
"Downstairs East Wing will permanently hold the heat to 62.0 "
"and cool to 83.0 AND Downstairs West Wing will permanently "
"hold the heat to 62.0 and cool to 83.0 AND Activate the mode "
"named 'Away 12' AND Master Suite will permanently hold the "
"heat to 62.0 and cool to 83.0",
)
self.assertEqual(automation_one.enabled, True)
self.assertEqual(automation_one.automation_id, 3467876)
```
|
{
"source": "jeroenterheerdt/TeespringStorefrontParser",
"score": 3
}
|
#### File: jeroenterheerdt/TeespringStorefrontParser/parser.py
```python
from pyquery import PyQuery
import requests
import json
InputFile = "input/store.html"
def getProductImageUrl(product_card_pq):
product_image_str = str(product_card_pq(".product_card__image"))
a = product_image_str.find('https://')
b = product_image_str.find(');" data-reactid')
product_image_url = product_image_str[a:b]
return product_image_url
def getProductCardTitle(product_card_pq):
return product_card_pq(".product_card__title").text()
def getProductCardProductName(product_card_pq):
return product_card_pq(".product_card__product_name").text()
def getProductCardPrice(product_card_pq):
return product_card_pq(".product_card__meta").text()
def getProductCardProductPageUrl(product_card_pq):
s = str(product_card_pq(".product_card").html())
a = s.find('https://')
b = s.find('" data-reactid')
return s[a:b]
def getProductSizes(product_page_url):
sizes = {}
response = requests.get(product_page_url)
product_page = PyQuery(response.content)
product_size_drop_down = product_page("#select-size-campaign-page")
for o in product_size_drop_down.items('option'):
option_text= o.val('value').text()
if not option_text == '--':
o_str = str(o.val('value'))
if not "disabled" in o_str:
a_find_str = 'data-usd-price-with-tax="'
a = o_str.find(a_find_str)
b = o_str.find('" data-gbp-price="')
price = o_str[a+len(a_find_str):b]
sizes.update({option_text:price})
return sizes
def getProductColors(product_page_url):
colors = []
response = requests.get(product_page_url)
product_page = PyQuery(response.content)
color_list = product_page(".product__color_list")
for c in color_list.items('li'):
col = str(c.html())
col_find_str = "background-color:"
a = col.find(col_find_str)
b = col.find('"/>')
color_hex = col[a+len(col_find_str):b]
colors.append(color_hex.upper())
#deduplicate
return list(set(colors))
class Product:
def __init__(self, title, name, price = None, image_url = None, page_url = None, sizes = None, colors = None):
self.title = title
self.name = name
self.price = price
self.image_url = image_url
self.page_url = page_url
self.sizes = sizes
self.colors = colors
with open(InputFile,encoding="utf8") as f:
html = f.read()
pq = PyQuery(html)
product_cards = pq(".product_card")
print("Found "+str(len(product_cards))+" products!")
products = []
for product_card in product_cards:
product_card_pq = PyQuery(product_card)
#product_image_url = getProductImageUrl(product_card_pq)
product_title = getProductCardTitle(product_card_pq)
product_name = getProductCardProductName(product_card_pq)
#product_price = getProductCardPrice(product_card_pq)
product_page_url = getProductCardProductPageUrl(product_card_pq)
#sizes = getProductSizes(product_page_url)
#colors = getProductColors(product_page_url)
#p = Product(product_title, product_name, product_price, product_image_url, product_page_url, sizes, colors)
p = Product(product_title, product_name, None, None, product_page_url)
products.append(p)
print("Parsed "+str(len(products)))
```
|
{
"source": "Jeroentetje3/PyBitmessage",
"score": 3
}
|
#### File: src/tests/test_addresses.py
```python
import unittest
from binascii import unhexlify
from pybitmessage import addresses
from .samples import sample_address, sample_ripe
class TestAddresses(unittest.TestCase):
"""Test addresses manipulations"""
def test_decode(self):
"""Decode some well known addresses and check the result"""
self.assertEqual(
addresses.decodeAddress(sample_address),
('success', 2, 1, unhexlify(sample_ripe)))
status, version, stream, ripe1 = addresses.decodeAddress(
'2cWzSnwjJ7yRP3nLEWUV5LisTZyREWSzUK')
self.assertEqual(status, 'success')
self.assertEqual(stream, 1)
self.assertEqual(version, 4)
status, version, stream, ripe2 = addresses.decodeAddress(
'2DBPTgeSawWYZceFD69AbDT5q4iUWtj1ZN')
self.assertEqual(status, 'success')
self.assertEqual(stream, 1)
self.assertEqual(version, 3)
self.assertEqual(ripe1, ripe2)
def test_encode(self):
"""Encode sample ripe and compare the result to sample address"""
self.assertEqual(
sample_address,
addresses.encodeAddress(2, 1, unhexlify(sample_ripe)))
```
|
{
"source": "jeroenubbink/commercetools-python-sdk",
"score": 2
}
|
#### File: commercetools-python-sdk/codegen/raml_types.py
```python
from typing import Dict, List, Optional
import attr
from codegen.utils import snakeit
@attr.s(auto_attribs=True)
class DataType:
name: str
type: Optional[str]
properties: List["Property"] = attr.Factory(lambda: [])
base: "DataType" = attr.ib(repr=False, default=None)
children: List["DataType"] = attr.ib(repr=False, default=attr.Factory(lambda: []))
discriminator_value: Optional[str] = None
discriminator: Optional[str] = None
enum: List[str] = attr.Factory(lambda: [])
package_name: Optional[str] = None
#: Annotations are additional free fields, not specified in the
#: raml specs
annotations: Dict[str, object] = attr.Factory(lambda: {})
@property
def is_scalar_type(self):
return self.type in [
"string",
"number",
"float",
"integer",
"boolean",
"date",
"file",
"any",
]
def get_bases(self):
bases = []
cur = self
while cur:
bases.append(cur)
cur = cur.base
return bases
def get_all_properties(self) -> List["Property"]:
"""Return all the properties for this datatype including parent types.
Note that we need to remove duplicate properties in case a sub resoruce
overrides a property of the parent.
"""
properties = {} # assume ordered dict
bases = reversed(self.get_bases()) # bottom to top
for base in bases:
for prop in base.properties:
properties[prop.name] = prop
return list(properties.values())
def get_discriminator_field(self):
field = None
bases = self.get_bases()
for base in bases:
if base.discriminator:
field = base.discriminator
break
if field:
all_properties = self.get_all_properties()
all_properties = {prop.name: prop for prop in all_properties}
return all_properties[field]
def get_all_children(self):
children = list(self.children)
for child in self.children:
children.extend(child.get_all_children())
return children
@attr.s(auto_attribs=True, slots=True)
class Property:
name: str
types: List[DataType]
optional: bool = False
many: bool = False
items: List[str] = attr.Factory(lambda: [])
items_types: Optional[List["DataType"]] = None
@property
def type(self):
if self.types:
return self.types[0]
@type.setter
def type(self, value):
if self.types:
self.types[0] = value
else:
self.types = [value]
@property
def attribute_name(self) -> Optional[str]:
name = snakeit(self.name)
if not name or not name.isidentifier():
return None
return name
@attr.s(auto_attribs=True)
class UnresolvedType:
name: str
```
#### File: commercetools-python-sdk/codegen/service_processor.py
```python
import typing
import attr
from codegen.utils import (
class_name,
create_codename,
create_method_name,
extract_name,
snakeit,
)
@attr.s(auto_attribs=True)
class ServiceParameter:
name: str
type: str
required: bool
multiple: bool = False
extra_data: dict = None
@property
def pytype(self):
if self.type == "string":
return str
return self.type
@attr.s(auto_attribs=True)
class ServiceMethod:
name: str
path: str
method: str
type: str
context_name: str = None
description: str = None
path_params: typing.List[ServiceParameter] = attr.Factory(list)
query_params: typing.List[ServiceParameter] = attr.Factory(list)
extra_params: typing.List[ServiceParameter] = attr.Factory(list)
input_type: str = None
returns: str = None
is_fileupload: bool = False
traits: typing.List["TraitInfo"] = attr.Factory(list)
@attr.s(auto_attribs=True)
class ServiceDomain:
path: str
parent: "ServiceDomain" = None
context_name: str = None
description: str = None
path_parameters: typing.List[ServiceParameter] = None
methods: typing.List[ServiceMethod] = None
resource_draft: str = None
resource_type: str = None
resource_querytype: str = None
def add_method(self, *methods):
for method in methods:
method.context_name = self.context_name
self.methods.append(method)
@attr.s(auto_attribs=True)
class TraitInfo:
name: str
class_name: str
params: typing.List[ServiceParameter] = attr.Factory(list)
class ServiceProcessor:
def __init__(self):
pass
def load(self, source):
self._source = source["/{projectKey}"]
self.traits = self._parse_traits(source["traits"])
self._resource_types = source["resourceTypes"]
def __iter__(self):
for path, data in self._source.items():
if path.startswith("/"):
yield self._parse_service(path, data)
def _parse_traits(self, source) -> typing.Dict[str, TraitInfo]:
result = {}
for name, data in source.items():
param_data = data.get("queryParameters", {})
params = _parse_query_parameters(param_data)
for param in params:
param.multiple = True
result[name] = TraitInfo(
name=name, class_name=class_name(name), params=params
)
return result
def _parse_service(self, path, source, parent=None):
domain = ServiceDomain(
path=path if not parent else parent.path + path,
parent=parent,
methods=[],
description=source.get("description"),
path_parameters=self._get_parameters(source),
resource_draft=extract_name(
_get_value(source, "type", "baseDomain", "resourceDraft")
),
resource_type=extract_name(
_get_value(source, "type", "baseDomain", "resourceType")
),
resource_querytype=extract_name(
_get_value(source, "type", "baseDomain", "resourceQueryType")
),
)
if parent and parent.path_parameters:
domain.path_parameters = parent.path_parameters + domain.path_parameters
domain.context_name = domain.resource_type or create_codename(path).title()
for endpoint, endpoint_data in source.items():
endpoint_type = _get_item_type(endpoint_data)
if endpoint_type == "baseDomain":
child = self._parse_service(endpoint, endpoint_data, domain)
domain.add_method(*child.methods)
del child
elif endpoint in ["post", "get", "delete"]:
method = self._get_domain_methods(
domain, endpoint, endpoint_data, source
)
if method:
domain.add_method(method)
elif endpoint.startswith("/"):
subparams = self._get_parameters(endpoint_data)
if not subparams:
method = self._get_action_method(
domain, endpoint, endpoint_data, source
)
if method:
domain.add_method(method)
else:
for method in self._get_resource_methods(
domain, endpoint, endpoint_data
):
domain.add_method(method)
return domain
def _get_domain_methods(self, service_domain, method, method_data, parent_data):
method_name = ""
if service_domain.parent:
method_name = snakeit(service_domain.context_name) + "_"
if method == "get":
method = ServiceMethod(
name=method_name + "query",
path=service_domain.path,
path_params=list(service_domain.path_parameters),
query_params=[],
type="query",
method=method,
returns=_get_return_type(
method_data, service_domain.resource_querytype
),
)
return self._add_metadata(method, method_data, parent_data)
elif method == "post":
method = ServiceMethod(
name=method_name + "create",
path=service_domain.path,
path_params=list(service_domain.path_parameters),
query_params=[],
type="create",
method=method,
input_type=service_domain.resource_draft,
returns=_get_return_type(method_data, service_domain.resource_type),
)
return self._add_metadata(method, method_data, parent_data)
def _get_action_method(self, service_domain, path, data, parent_data):
if "get" in data and "post" in data:
if data["post"].get("responses"):
endpoint_data = data["post"]
method = "post"
else:
endpoint_data = data["get"]
method = "get"
elif "post" in data:
endpoint_data = data["post"]
method = "post"
elif "get" in data:
endpoint_data = data["get"]
method = "get"
else:
return None
method_name = create_method_name(path)
if service_domain.parent:
method_name = snakeit(service_domain.context_name) + "_" + method_name
method = ServiceMethod(
name=method_name,
path=service_domain.path + path,
path_params=[],
query_params=[],
type="action",
method=method,
input_type=_get_input_type(endpoint_data),
returns=_get_return_type(endpoint_data, service_domain.resource_type),
)
return self._add_metadata(method, endpoint_data, data)
def _get_resource_methods(self, service_domain, path, data):
params = list(service_domain.path_parameters)
params.extend(self._get_parameters(data))
method_name = "_%s" % snakeit(data["(methodName)"])
method_name = method_name.replace("with", "by")
type_name = _get_item_type(data)
name_prefix = ""
if service_domain.parent:
name_prefix = snakeit(service_domain.context_name) + "_"
for endpoint_path, endpoint_data in data.items():
if endpoint_path == "post":
input_type = data["type"][type_name]["resourceUpdateType"]
method = ServiceMethod(
name=name_prefix + "update" + method_name,
path=service_domain.path + path,
path_params=list(params),
type="update",
method="post",
input_type=service_domain.resource_type + input_type,
returns=_get_return_type(
endpoint_data, service_domain.resource_type
),
)
yield self._add_metadata(method, endpoint_data, data)
elif endpoint_path == "get":
method = ServiceMethod(
name=name_prefix + "get" + method_name,
path=service_domain.path + path,
path_params=list(params),
type="get",
method="get",
returns=_get_return_type(
endpoint_data, service_domain.resource_type
),
)
yield self._add_metadata(method, endpoint_data, data)
elif endpoint_path == "delete":
method = ServiceMethod(
name=name_prefix + "delete" + method_name,
path=service_domain.path + path,
path_params=list(params),
type="delete",
method="delete",
returns=_get_return_type(
endpoint_data, service_domain.resource_type
),
)
yield self._add_metadata(method, endpoint_data, data)
elif endpoint_path.startswith("/"):
yield self._get_action_method(
service_domain, endpoint_path, endpoint_data, data
)
def _get_parameters(self, data) -> typing.List[ServiceParameter]:
if isinstance(data, str):
return []
if "uriParameters" in data:
params = []
for key, val in data["uriParameters"].items():
param = ServiceParameter(name=key, type=val, required=True)
params.append(param)
return params
type_name = _get_item_type(data)
if not isinstance(data.get("type"), dict):
return []
value = data["type"][type_name].get("uriParameterName")
if value:
param = ServiceParameter(name=value, type="string", required=True)
return [param]
return []
def _add_metadata(self, method: ServiceMethod, data, parent_data):
if isinstance(data, str):
return method
method.description = data.get("description", "")
if parent_data.get("description"):
method.description += "\n\n" + parent_data["description"].strip()
type_name = _get_item_type(parent_data)
traits = []
# Get traits from base resource
type_data = self._resource_types[type_name]
type_data = type_data.get(method.method) or type_data.get(method.method + "?")
traits.extend(type_data.get("is", []))
# Missing in raml specs?
if method.type == "update":
traits.append("versioned")
# Get params from traits
traits.extend(data.get("is", []))
for trait in traits:
if isinstance(trait, str):
trait_info = self.traits[trait]
else:
trait_key = list(trait.keys())[0]
trait_info = self.traits[trait_key]
method.query_params.extend(trait_info.params)
method.traits.append(trait_info)
# Get params specified on method
params = data.get("queryParameters", {})
if params:
method.extra_params = _parse_query_parameters(params)
method.query_params.extend(method.extra_params)
# Check if this is a file upload (?)
if data.get("body", {}).get("type") == "file":
method.is_fileupload = True
method.query_params.append(
ServiceParameter(name="fh", type="file", required=True, extra_data=None)
)
# De-duplicate param names
deduplicated = {}
for param in method.query_params:
deduplicated[param.name] = param
method.query_params = list(deduplicated.values())
return method
def _parse_query_parameters(params):
result = []
for name, value in params.items():
required = value.get("required", True)
if name.endswith("?"):
name = name[:-1]
required = False
param = ServiceParameter(
name=name,
type=value.get("type", "string"),
required=required,
extra_data=value,
)
result.append(param)
return result
def _get_value(data, *keys):
val = data
for key in keys:
try:
val = val[key]
except (KeyError, TypeError):
return None
return val
def _get_input_type(data, default=None):
for code in [200, 201]:
try:
return data["body"]["application/json"]["type"]
except (KeyError, TypeError):
continue
return default
def _get_return_type(data, default=None):
for code in [200, 201]:
try:
return data["responses"][code]["body"]["application/json"]["type"]
except (KeyError, TypeError):
continue
return default
def _get_item_type(data):
if isinstance(data, str):
return "base"
try:
typeval = data.get("type")
if not typeval:
return "base"
if isinstance(typeval, str):
return typeval
return list(typeval.keys())[0]
except (TypeError, KeyError):
pass
return "base"
```
#### File: codegen/tests/test_utils.py
```python
import pytest
import utils
@pytest.mark.parametrize(
"test_input,test_output",
[("fooBar", "foo_bar"), ("externalOAuth", "external_oauth")],
)
def test_snakit(test_input, test_output):
assert utils.snakeit(test_input) == test_output
def test_format_docstring():
value = "Returns a customer by its ID from a specific Store. The {storeKey} path parameter maps to a Store's key.\nIt also considers customers that do not have the stores field.\nIf the customer exists in the commercetools project but the stores field references different stores,\nthis method returns a ResourceNotFound error.\n"
newvalue = utils.format_docstring(value)
assert (
newvalue
== """Returns a customer by its ID from a specific Store.
The {storeKey} path parameter maps to a Store's key.
It also considers customers that do not have the stores field.
If the customer exists in the commercetools project but the stores field references different stores,
this method returns a ResourceNotFound error.
"""
)
```
#### File: src/commercetools/client.py
```python
import os
import typing
import urllib.parse
import requests
from marshmallow.base import SchemaABC
from oauthlib.oauth2 import BackendApplicationClient
from requests.adapters import HTTPAdapter
from requests_oauthlib import OAuth2Session
from urllib3.util.retry import Retry
from commercetools._schemas._error import ErrorResponseSchema
from commercetools.constants import HEADER_CORRELATION_ID
from commercetools.exceptions import CommercetoolsError
from commercetools.helpers import _concurrent_retry
from commercetools.services import ServicesMixin
from commercetools.utils import BaseTokenSaver, DefaultTokenSaver, fix_token_url
class RefreshingOAuth2Session(OAuth2Session):
def refresh_token(self, token_url, **kwargs):
kwargs.update(self.auto_refresh_kwargs)
kwargs["scope"] = self.scope
return self.fetch_token(token_url, **kwargs)
class Client(ServicesMixin):
"""The Commercetools Client, used to interact with the Commercetools API.
:param project_key: the key for the project with which you want to interact
:param client_id: the oauth2 client id
:param client_secret: the oauth2 client secret
:param scope: the oauth2 scope. If None then 'manage_project:{project_key}'
:param url: the api endpoint
:param token_url: the oauth2 token url endpoint. This should be the full
path to the token url.
:param token_saver: optional custom token saver to store and retrieve the
oauth2 tokens.
"""
def __init__(
self,
project_key: str = None,
client_id: str = None,
client_secret: str = None,
scope: typing.List[str] = None,
url: str = None,
token_url: str = None,
token_saver: BaseTokenSaver = None,
) -> None:
# Use environment variables as fallback
config = {
"project_key": project_key,
"client_id": client_id,
"client_secret": client_secret,
"url": url,
"token_url": token_url,
"scope": scope,
}
# Make sure we use the config vars
del project_key, client_id, client_secret, url, token_url, scope
self._config = self._read_env_vars(config)
self._config["token_url"] = fix_token_url(self._config["token_url"])
self._token_saver = token_saver or DefaultTokenSaver()
self._url = self._config["url"]
self._base_url = f"{self._config['url']}/{self._config['project_key']}/"
# Fetch token from the token saver
token = self._token_saver.get_token(
self._config["client_id"], self._config["scope"]
)
client = BackendApplicationClient(
client_id=self._config["client_id"], scope=self._config["scope"]
)
self._http_client = RefreshingOAuth2Session(
client=client,
scope=self._config["scope"],
auto_refresh_url=self._config["token_url"],
auto_refresh_kwargs={
"client_id": self._config["client_id"],
"client_secret": self._config["client_secret"],
},
token_updater=self._save_token,
)
# Register retry handling for Connection errors and 502, 503, 504.
retry = Retry(status=3, connect=3, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
self._http_client.mount("http://", adapter)
self._http_client.mount("https://", adapter)
if token:
self._http_client.token = token
else:
token = self._http_client.fetch_token(
token_url=self._config["token_url"],
scope=self._config["scope"],
client_id=self._config["client_id"],
client_secret=self._config["client_secret"],
)
self._save_token(token)
def _save_token(self, token):
self._token_saver.add_token(
self._config["client_id"], self._config["scope"], token
)
def _get(
self, endpoint: str, params: typing.Dict[str, typing.Any], schema_cls: SchemaABC
) -> typing.Any:
"""Retrieve a single object from the commercetools platform"""
response = self._http_client.get(self._base_url + endpoint, params=params)
if response.status_code == 200:
return schema_cls().load(response.json())
return self._process_error(response)
def _post(
self,
endpoint: str,
params: typing.Dict[str, str],
data_object: typing.Any,
request_schema_cls: SchemaABC,
response_schema_cls: SchemaABC,
form_encoded: bool = False,
force_update: bool = False,
) -> typing.Any:
"""Retrieve a single object from the commercetools platform"""
@_concurrent_retry(3 if force_update else 0)
def remote_http_call(data):
if form_encoded:
kwargs = {"data": data}
else:
kwargs = {"json": data}
if params:
kwargs["params"] = params
response = self._http_client.post(self._base_url + endpoint, **kwargs)
if response.status_code in (200, 201):
return response_schema_cls().load(response.json())
return self._process_error(response)
data = request_schema_cls().dump(data_object)
return remote_http_call(data)
def _upload(
self,
endpoint: str,
params: typing.Dict[str, str],
file: typing.IO,
response_schema_cls: SchemaABC,
) -> typing.Any:
"""Retrieve a single object from the commercetools platform"""
response = self._http_client.post(
self._base_url + endpoint, data=file.read(), params=params
)
if response.status_code in (200, 201):
return response_schema_cls().load(response.json())
return self._process_error(response)
def _delete(
self,
endpoint: str,
params: typing.Dict[str, str],
response_schema_cls: SchemaABC,
force_delete: bool = False,
) -> typing.Any:
"""Delete an object from the commercetools platform"""
@_concurrent_retry(3 if force_delete else 0)
def remote_http_call(data):
response = self._http_client.delete(
self._base_url + endpoint, params=params
)
if response.status_code == 200:
return response_schema_cls().load(response.json())
return self._process_error(response)
return remote_http_call(params)
def _process_error(self, response: requests.Response) -> None:
correlation_id = response.headers.get(HEADER_CORRELATION_ID)
if not response.content:
response.raise_for_status()
obj = ErrorResponseSchema().loads(response.content)
# We'll fetch the 'raw' errors from the response because some of the
# attributes are not included in the schemas.
# With the raw errors in the CommercetoolsError object we can use that
# information later to render more detailed error messages
errors_raw = []
try:
response_json = response.json()
except ValueError:
pass
else:
errors_raw = response_json.get("errors", [])
raise CommercetoolsError(obj.message, errors_raw, obj, correlation_id)
def _read_env_vars(self, config: dict) -> dict:
if not config.get("project_key"):
config["project_key"] = os.environ.get("CTP_PROJECT_KEY")
if not config.get("client_id"):
config["client_id"] = os.environ.get("CTP_CLIENT_ID")
if not config.get("client_secret"):
config["client_secret"] = os.environ.get("CTP_CLIENT_SECRET")
if not config.get("url"):
config["url"] = os.environ.get("CTP_API_URL")
if not config.get("token_url"):
config["token_url"] = os.environ.get("CTP_AUTH_URL")
if not config["scope"]:
config["scope"] = os.environ.get("CTP_SCOPES")
if config["scope"]:
config["scope"] = config["scope"].split(",")
else:
config["scope"] = ["manage_project:%s" % config["project_key"]]
for key, value in config.items():
if value is None:
raise ValueError(f"No value set for {key}")
return config
```
#### File: src/commercetools/exceptions.py
```python
import typing
from commercetools.types import ErrorResponse
class CommercetoolsError(Exception):
response: ErrorResponse
correlation_id: typing.Optional[str]
def __init__(
self,
message: typing.Any,
errors: typing.List[dict],
response: ErrorResponse,
correlation_id: str = None,
) -> None:
super().__init__(message)
self.response = response
self.errors = errors
self.correlation_id = correlation_id
def __str__(self):
result = super().__str__()
if self.details:
return f"{result} ({', '.join(self.details)})"
return result
@property
def details(self) -> typing.List[str]:
return [
e["detailedErrorMessage"]
for e in self.errors
if "detailedErrorMessage" in e
]
@property
def codes(self) -> typing.List[str]:
try:
return [e.code for e in self.response.errors]
except AttributeError:
return []
@property
def code(self) -> str:
"""Convenience property to easily get the error code.
Returns the code of the first error, just as
'message' is always the message of the first error.
"""
try:
return self.codes[0]
except KeyError:
return ""
```
#### File: commercetools/services/messages.py
```python
import typing
from commercetools._schemas._message import (
MessagePagedQueryResponseSchema,
MessageSchema,
)
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.types._message import Message, MessagePagedQueryResponse
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _MessageQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class MessageService(abstract.AbstractService):
"""A message represents a change or an action performed on a resource (like an
Order or a Product)."""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> Message:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"messages/{id}", params=params, schema_cls=MessageSchema
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> MessagePagedQueryResponse:
"""A message represents a change or an action performed on a resource (like
an Order or a Product).
"""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"withTotal": with_total,
"where": where,
"predicate_var": predicate_var,
},
_MessageQuerySchema,
)
return self._client._get(
endpoint="messages",
params=params,
schema_cls=MessagePagedQueryResponseSchema,
)
```
#### File: commercetools/services/shopping_lists.py
```python
import typing
from commercetools._schemas._shopping_list import (
ShoppingListDraftSchema,
ShoppingListPagedQueryResponseSchema,
ShoppingListSchema,
ShoppingListUpdateSchema,
)
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.types._shopping_list import (
ShoppingList,
ShoppingListDraft,
ShoppingListPagedQueryResponse,
ShoppingListUpdate,
ShoppingListUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _ShoppingListQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _ShoppingListUpdateSchema(traits.ExpandableSchema, traits.VersionedSchema):
pass
class _ShoppingListDeleteSchema(
traits.VersionedSchema, traits.ExpandableSchema, traits.DataErasureSchema
):
pass
class ShoppingListService(abstract.AbstractService):
"""shopping-lists e.
g. for wishlist support
"""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> ShoppingList:
"""Gets a shopping list by ID."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"shopping-lists/{id}",
params=params,
schema_cls=ShoppingListSchema,
)
def get_by_key(self, key: str, *, expand: OptionalListStr = None) -> ShoppingList:
"""Gets a shopping list by Key."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"shopping-lists/key={key}",
params=params,
schema_cls=ShoppingListSchema,
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> ShoppingListPagedQueryResponse:
"""shopping-lists e.g. for wishlist support
"""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"withTotal": with_total,
"where": where,
"predicate_var": predicate_var,
},
_ShoppingListQuerySchema,
)
return self._client._get(
endpoint="shopping-lists",
params=params,
schema_cls=ShoppingListPagedQueryResponseSchema,
)
def create(
self, draft: ShoppingListDraft, *, expand: OptionalListStr = None
) -> ShoppingList:
"""shopping-lists e.g. for wishlist support
"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="shopping-lists",
params=params,
data_object=draft,
request_schema_cls=ShoppingListDraftSchema,
response_schema_cls=ShoppingListSchema,
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[ShoppingListUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> ShoppingList:
params = self._serialize_params({"expand": expand}, _ShoppingListUpdateSchema)
update_action = ShoppingListUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"shopping-lists/{id}",
params=params,
data_object=update_action,
request_schema_cls=ShoppingListUpdateSchema,
response_schema_cls=ShoppingListSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[ShoppingListUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> ShoppingList:
"""Update a shopping list found by its Key."""
params = self._serialize_params({"expand": expand}, _ShoppingListUpdateSchema)
update_action = ShoppingListUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"shopping-lists/key={key}",
params=params,
data_object=update_action,
request_schema_cls=ShoppingListUpdateSchema,
response_schema_cls=ShoppingListSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
*,
expand: OptionalListStr = None,
data_erasure: bool = None,
force_delete: bool = False,
) -> ShoppingList:
params = self._serialize_params(
{"version": version, "expand": expand, "dataErasure": data_erasure},
_ShoppingListDeleteSchema,
)
return self._client._delete(
endpoint=f"shopping-lists/{id}",
params=params,
response_schema_cls=ShoppingListSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
*,
expand: OptionalListStr = None,
data_erasure: bool = None,
force_delete: bool = False,
) -> ShoppingList:
params = self._serialize_params(
{"version": version, "expand": expand, "dataErasure": data_erasure},
_ShoppingListDeleteSchema,
)
return self._client._delete(
endpoint=f"shopping-lists/key={key}",
params=params,
response_schema_cls=ShoppingListSchema,
force_delete=force_delete,
)
```
#### File: commercetools/types/_tax_category.py
```python
import datetime
import typing
from commercetools.types._abstract import _BaseType
from commercetools.types._common import (
BaseResource,
Reference,
ReferenceTypeId,
ResourceIdentifier,
)
if typing.TYPE_CHECKING:
from ._common import CreatedBy, LastModifiedBy
__all__ = [
"SubRate",
"TaxCategory",
"TaxCategoryAddTaxRateAction",
"TaxCategoryChangeNameAction",
"TaxCategoryDraft",
"TaxCategoryPagedQueryResponse",
"TaxCategoryReference",
"TaxCategoryRemoveTaxRateAction",
"TaxCategoryReplaceTaxRateAction",
"TaxCategoryResourceIdentifier",
"TaxCategorySetDescriptionAction",
"TaxCategorySetKeyAction",
"TaxCategoryUpdate",
"TaxCategoryUpdateAction",
"TaxRate",
"TaxRateDraft",
]
class SubRate(_BaseType):
#: :class:`str`
name: str
#: :class:`float`
amount: float
def __init__(self, *, name: str = None, amount: float = None) -> None:
self.name = name
self.amount = amount
super().__init__()
def __repr__(self) -> str:
return "SubRate(name=%r, amount=%r)" % (self.name, self.amount)
class TaxCategory(BaseResource):
#: :class:`str`
id: str
#: :class:`int`
version: int
#: :class:`datetime.datetime` `(Named` ``createdAt`` `in Commercetools)`
created_at: datetime.datetime
#: :class:`datetime.datetime` `(Named` ``lastModifiedAt`` `in Commercetools)`
last_modified_at: datetime.datetime
#: Optional :class:`commercetools.types.LastModifiedBy` `(Named` ``lastModifiedBy`` `in Commercetools)`
last_modified_by: typing.Optional["LastModifiedBy"]
#: Optional :class:`commercetools.types.CreatedBy` `(Named` ``createdBy`` `in Commercetools)`
created_by: typing.Optional["CreatedBy"]
#: :class:`str`
name: str
#: Optional :class:`str`
description: typing.Optional[str]
#: List of :class:`commercetools.types.TaxRate`
rates: typing.List["TaxRate"]
#: Optional :class:`str`
key: typing.Optional[str]
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
name: str = None,
description: typing.Optional[str] = None,
rates: typing.List["TaxRate"] = None,
key: typing.Optional[str] = None
) -> None:
self.id = id
self.version = version
self.created_at = created_at
self.last_modified_at = last_modified_at
self.last_modified_by = last_modified_by
self.created_by = created_by
self.name = name
self.description = description
self.rates = rates
self.key = key
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
)
def __repr__(self) -> str:
return (
"TaxCategory(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, name=%r, description=%r, rates=%r, key=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.name,
self.description,
self.rates,
self.key,
)
)
class TaxCategoryDraft(_BaseType):
#: :class:`str`
name: str
#: Optional :class:`str`
description: typing.Optional[str]
#: List of :class:`commercetools.types.TaxRateDraft`
rates: typing.List["TaxRateDraft"]
#: Optional :class:`str`
key: typing.Optional[str]
def __init__(
self,
*,
name: str = None,
description: typing.Optional[str] = None,
rates: typing.List["TaxRateDraft"] = None,
key: typing.Optional[str] = None
) -> None:
self.name = name
self.description = description
self.rates = rates
self.key = key
super().__init__()
def __repr__(self) -> str:
return "TaxCategoryDraft(name=%r, description=%r, rates=%r, key=%r)" % (
self.name,
self.description,
self.rates,
self.key,
)
class TaxCategoryPagedQueryResponse(_BaseType):
#: :class:`int`
limit: int
#: :class:`int`
count: int
#: Optional :class:`int`
total: typing.Optional[int]
#: :class:`int`
offset: int
#: List of :class:`commercetools.types.TaxCategory`
results: typing.Sequence["TaxCategory"]
def __init__(
self,
*,
limit: int = None,
count: int = None,
total: typing.Optional[int] = None,
offset: int = None,
results: typing.Sequence["TaxCategory"] = None
) -> None:
self.limit = limit
self.count = count
self.total = total
self.offset = offset
self.results = results
super().__init__()
def __repr__(self) -> str:
return (
"TaxCategoryPagedQueryResponse(limit=%r, count=%r, total=%r, offset=%r, results=%r)"
% (self.limit, self.count, self.total, self.offset, self.results)
)
class TaxCategoryReference(Reference):
#: Optional :class:`commercetools.types.TaxCategory`
obj: typing.Optional["TaxCategory"]
def __init__(
self,
*,
type_id: "ReferenceTypeId" = None,
id: str = None,
obj: typing.Optional["TaxCategory"] = None
) -> None:
self.obj = obj
super().__init__(type_id=ReferenceTypeId.TAX_CATEGORY, id=id)
def __repr__(self) -> str:
return "TaxCategoryReference(type_id=%r, id=%r, obj=%r)" % (
self.type_id,
self.id,
self.obj,
)
class TaxCategoryResourceIdentifier(ResourceIdentifier):
def __init__(
self,
*,
type_id: typing.Optional["ReferenceTypeId"] = None,
id: typing.Optional[str] = None,
key: typing.Optional[str] = None
) -> None:
super().__init__(type_id=ReferenceTypeId.TAX_CATEGORY, id=id, key=key)
def __repr__(self) -> str:
return "TaxCategoryResourceIdentifier(type_id=%r, id=%r, key=%r)" % (
self.type_id,
self.id,
self.key,
)
class TaxCategoryUpdate(_BaseType):
#: :class:`int`
version: int
#: :class:`list`
actions: list
def __init__(self, *, version: int = None, actions: list = None) -> None:
self.version = version
self.actions = actions
super().__init__()
def __repr__(self) -> str:
return "TaxCategoryUpdate(version=%r, actions=%r)" % (
self.version,
self.actions,
)
class TaxCategoryUpdateAction(_BaseType):
#: :class:`str`
action: str
def __init__(self, *, action: str = None) -> None:
self.action = action
super().__init__()
def __repr__(self) -> str:
return "TaxCategoryUpdateAction(action=%r)" % (self.action,)
class TaxRate(_BaseType):
#: Optional :class:`str`
id: typing.Optional[str]
#: :class:`str`
name: str
#: :class:`float`
amount: float
#: :class:`bool` `(Named` ``includedInPrice`` `in Commercetools)`
included_in_price: bool
#: :class:`str`
country: "str"
#: Optional :class:`str`
state: typing.Optional[str]
#: Optional list of :class:`commercetools.types.SubRate` `(Named` ``subRates`` `in Commercetools)`
sub_rates: typing.Optional[typing.List["SubRate"]]
def __init__(
self,
*,
id: typing.Optional[str] = None,
name: str = None,
amount: float = None,
included_in_price: bool = None,
country: "str" = None,
state: typing.Optional[str] = None,
sub_rates: typing.Optional[typing.List["SubRate"]] = None
) -> None:
self.id = id
self.name = name
self.amount = amount
self.included_in_price = included_in_price
self.country = country
self.state = state
self.sub_rates = sub_rates
super().__init__()
def __repr__(self) -> str:
return (
"TaxRate(id=%r, name=%r, amount=%r, included_in_price=%r, country=%r, state=%r, sub_rates=%r)"
% (
self.id,
self.name,
self.amount,
self.included_in_price,
self.country,
self.state,
self.sub_rates,
)
)
class TaxRateDraft(_BaseType):
#: :class:`str`
name: str
#: Optional :class:`float`
amount: typing.Optional[float]
#: :class:`bool` `(Named` ``includedInPrice`` `in Commercetools)`
included_in_price: bool
#: :class:`str`
country: "str"
#: Optional :class:`str`
state: typing.Optional[str]
#: Optional list of :class:`commercetools.types.SubRate` `(Named` ``subRates`` `in Commercetools)`
sub_rates: typing.Optional[typing.List["SubRate"]]
def __init__(
self,
*,
name: str = None,
amount: typing.Optional[float] = None,
included_in_price: bool = None,
country: "str" = None,
state: typing.Optional[str] = None,
sub_rates: typing.Optional[typing.List["SubRate"]] = None
) -> None:
self.name = name
self.amount = amount
self.included_in_price = included_in_price
self.country = country
self.state = state
self.sub_rates = sub_rates
super().__init__()
def __repr__(self) -> str:
return (
"TaxRateDraft(name=%r, amount=%r, included_in_price=%r, country=%r, state=%r, sub_rates=%r)"
% (
self.name,
self.amount,
self.included_in_price,
self.country,
self.state,
self.sub_rates,
)
)
class TaxCategoryAddTaxRateAction(TaxCategoryUpdateAction):
#: :class:`commercetools.types.TaxRateDraft` `(Named` ``taxRate`` `in Commercetools)`
tax_rate: "TaxRateDraft"
def __init__(self, *, action: str = None, tax_rate: "TaxRateDraft" = None) -> None:
self.tax_rate = tax_rate
super().__init__(action="addTaxRate")
def __repr__(self) -> str:
return "TaxCategoryAddTaxRateAction(action=%r, tax_rate=%r)" % (
self.action,
self.tax_rate,
)
class TaxCategoryChangeNameAction(TaxCategoryUpdateAction):
#: :class:`str`
name: str
def __init__(self, *, action: str = None, name: str = None) -> None:
self.name = name
super().__init__(action="changeName")
def __repr__(self) -> str:
return "TaxCategoryChangeNameAction(action=%r, name=%r)" % (
self.action,
self.name,
)
class TaxCategoryRemoveTaxRateAction(TaxCategoryUpdateAction):
#: :class:`str` `(Named` ``taxRateId`` `in Commercetools)`
tax_rate_id: str
def __init__(self, *, action: str = None, tax_rate_id: str = None) -> None:
self.tax_rate_id = tax_rate_id
super().__init__(action="removeTaxRate")
def __repr__(self) -> str:
return "TaxCategoryRemoveTaxRateAction(action=%r, tax_rate_id=%r)" % (
self.action,
self.tax_rate_id,
)
class TaxCategoryReplaceTaxRateAction(TaxCategoryUpdateAction):
#: :class:`str` `(Named` ``taxRateId`` `in Commercetools)`
tax_rate_id: str
#: :class:`commercetools.types.TaxRateDraft` `(Named` ``taxRate`` `in Commercetools)`
tax_rate: "TaxRateDraft"
def __init__(
self,
*,
action: str = None,
tax_rate_id: str = None,
tax_rate: "TaxRateDraft" = None
) -> None:
self.tax_rate_id = tax_rate_id
self.tax_rate = tax_rate
super().__init__(action="replaceTaxRate")
def __repr__(self) -> str:
return (
"TaxCategoryReplaceTaxRateAction(action=%r, tax_rate_id=%r, tax_rate=%r)"
% (self.action, self.tax_rate_id, self.tax_rate)
)
class TaxCategorySetDescriptionAction(TaxCategoryUpdateAction):
#: Optional :class:`str`
description: typing.Optional[str]
def __init__(
self, *, action: str = None, description: typing.Optional[str] = None
) -> None:
self.description = description
super().__init__(action="setDescription")
def __repr__(self) -> str:
return "TaxCategorySetDescriptionAction(action=%r, description=%r)" % (
self.action,
self.description,
)
class TaxCategorySetKeyAction(TaxCategoryUpdateAction):
#: Optional :class:`str`
key: typing.Optional[str]
def __init__(self, *, action: str = None, key: typing.Optional[str] = None) -> None:
self.key = key
super().__init__(action="setKey")
def __repr__(self) -> str:
return "TaxCategorySetKeyAction(action=%r, key=%r)" % (self.action, self.key)
```
#### File: src/commercetools/utils.py
```python
import threading
import urllib.parse
tls = threading.local()
class BaseTokenSaver:
def get_token(self, client_id, scopes):
raise NotImplementedError()
def add_token(self, client_id, scopes, token):
raise NotImplementedError()
def _create_token_hash(self, client_id, scopes):
assert scopes is not None
return "%s:%s" % (client_id, ";".join(scopes))
class DefaultTokenSaver(BaseTokenSaver):
@property
def storage(self):
items = getattr(tls, "tokens", None)
if items is None:
items = {}
setattr(tls, "tokens", items)
return items
def add_token(self, client_id, scopes, token):
name = self._create_token_hash(client_id, scopes)
self.storage[name] = token
def get_token(self, client_id, scopes):
name = self._create_token_hash(client_id, scopes)
return self.storage.get(name)
@classmethod
def clear_cache(cls):
items = getattr(tls, "tokens", {})
items.clear()
def fix_token_url(token_url: str) -> str:
"""
Ensure the token url has the right format.
Often clients only pass the base url instead of the complete
token url, which gets confusing for users.
"""
parts = urllib.parse.urlparse(token_url)
if parts.path == "":
token_url = urllib.parse.urlunparse((*parts[:2], "/oauth/token", *parts[3:]))
return token_url
```
#### File: commercetools-python-sdk/tests/test_mock_server.py
```python
import os
import requests
from commercetools import Client
from commercetools.types import (
ChannelDraft,
ChannelResourceIdentifier,
ChannelRoleEnum,
LocalizedString,
ProductDraft,
StoreDraft,
)
def test_http_server(commercetools_client, commercetools_http_server):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
client = Client(
project_key="unittest",
client_id="client-id",
client_secret="client-secret",
scope=[],
url=commercetools_http_server.api_url,
token_url=f"{commercetools_http_server.api_url}/oauth/token",
)
query_result = client.products.query()
assert query_result.count == 0
product = client.products.create(ProductDraft(name=LocalizedString(nl="Testje")))
client.products.get_by_id(product.id)
url = commercetools_http_server.api_url + f"/unittest/products/{product.id}"
response = requests.get(url, headers={"Authorization": "Bearer token"})
assert response.status_code == 200, response.text
data = response.json()
assert data["masterData"]["staged"]["name"]["nl"] == "Testje"
def test_http_server_expanding(commercetools_client, commercetools_http_server):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
client = Client(
project_key="unittest",
client_id="client-id",
client_secret="client-secret",
scope=[],
url=commercetools_http_server.api_url,
token_url=f"{commercetools_http_server.api_url}/oauth/token",
)
client.channels.create(
ChannelDraft(key="FOO", roles=[ChannelRoleEnum.PRODUCT_DISTRIBUTION])
)
store = client.stores.create(
StoreDraft(
key="FOO", distribution_channels=[ChannelResourceIdentifier(key="FOO")]
)
)
url = commercetools_http_server.api_url + f"/unittest/stores/{store.id}"
response = requests.get(
url,
params={"expand": "distributionChannels[*]"},
headers={"Authorization": "Bearer token"},
)
assert response.status_code == 200, response.text
data = response.json()
assert data["distributionChannels"][0]["obj"]["key"] == "FOO"
```
#### File: commercetools-python-sdk/tests/test_service_custom_objects.py
```python
import pytest
from requests.exceptions import HTTPError
from commercetools import types
def test_custom_object_get_by_id(client):
custom_object = client.custom_objects.create_or_update(
types.CustomObjectDraft(container="unittest", key="test-object", value=1234)
)
assert custom_object.id
assert custom_object.container == "unittest"
assert custom_object.key == "test-object"
assert custom_object.value == 1234
custom_object = client.custom_objects.get_by_id(custom_object.id)
assert custom_object.container == "unittest"
assert custom_object.key == "test-object"
assert custom_object.value == 1234
with pytest.raises(HTTPError):
client.custom_objects.get_by_id("invalid")
def test_custom_object_query(client):
client.custom_objects.create_or_update(
types.CustomObjectDraft(container="unittest", key="test-object-1", value=1234)
)
client.custom_objects.create_or_update(
types.CustomObjectDraft(container="unittest", key="test-object-2", value=1234)
)
# single sort query
result = client.custom_objects.query(sort="id asc")
assert len(result.results) == 2
assert result.total == 2
# multiple sort queries
result = client.custom_objects.query(sort=["id asc", "name asc"])
assert len(result.results) == 2
assert result.total == 2
def test_custom_object_update(client):
"""Test the return value of the update methods.
It doesn't test the actual update itself.
TODO: See if this is worth testing since we're using a mocking backend
"""
custom_object = client.custom_objects.create_or_update(
types.CustomObjectDraft(container="unittest", key="test-object-1", value=1234)
)
assert custom_object.key == "test-object-1"
custom_object = client.custom_objects.create_or_update(
types.CustomObjectDraft(container="unittest", key="test-object-1", value=2345)
)
assert custom_object.key == "test-object-1"
```
#### File: commercetools-python-sdk/tests/test_service_extensions.py
```python
from commercetools import types
def test_extension_create(client):
extension = client.extensions.create(types.ExtensionDraft())
assert extension.id
def test_extension_get_by_id(client):
extension = client.extensions.create(types.ExtensionDraft())
assert extension.id
extension = client.extensions.get_by_id(extension.id)
assert extension.id
```
#### File: commercetools-python-sdk/tests/test_service_payment.py
```python
from commercetools import types
def test_payments_get_by_id(client):
payment = client.payments.create(
types.PaymentDraft(
key="test-payment",
amount_planned=types.Money(cent_amount=2000, currency_code="GBP"),
payment_method_info=types.PaymentMethodInfo(
payment_interface="ADYEN", method="mc"
),
transactions=[
types.TransactionDraft(
type=types.TransactionType.CHARGE,
amount=types.Money(cent_amount=2000, currency_code="GBP"),
interaction_id="8525483242578266",
state=types.TransactionState.SUCCESS,
)
],
interface_interactions=[
types.CustomFieldsDraft(
fields=types.FieldContainer(
{
"operations": "CANCEL,CAPTURE,REFUND",
"success": True,
"psp_reference": "8525483242578266",
"merchant_reference": "some reference",
"reason": "82132:0005:10/2020",
"amount": 2000,
"payment_method": "mc",
"event_date": "2019-01-24T11:04:17.000000Z",
"currency_code": "GBP",
"event_code": "AUTHORISATION",
"merchant_account_code": "TestMerchant",
}
)
)
],
)
)
assert payment.id
assert payment.key == "test-payment"
def test_update_actions(client):
payment = client.payments.create(
types.PaymentDraft(
key="test-payment",
amount_planned=types.Money(cent_amount=2000, currency_code="GBP"),
payment_method_info=types.PaymentMethodInfo(
payment_interface="ADYEN", method="mc"
),
transactions=[
types.TransactionDraft(
type=types.TransactionType.CHARGE,
amount=types.Money(cent_amount=2000, currency_code="GBP"),
state=types.TransactionState.PENDING,
)
],
)
)
existing_transaction = payment.transactions[0]
payment = client.payments.update_by_id(
payment.id,
payment.version,
actions=[
types.PaymentAddInterfaceInteractionAction(
fields=types.FieldContainer({"pspRef": "1337"})
),
types.PaymentChangeTransactionInteractionIdAction(
transaction_id=existing_transaction.id, interaction_id="1337"
),
types.PaymentAddTransactionAction(
transaction=types.TransactionDraft(
type=types.TransactionType.CHARGE,
amount=types.Money(currency_code="GBP", cent_amount=1000),
interaction_id="123",
state=types.TransactionState.INITIAL,
)
),
types.PaymentChangeTransactionStateAction(
transaction_id=existing_transaction.id,
state=types.TransactionState.SUCCESS,
),
],
)
assert payment.interface_interactions[0].fields == {"pspRef": "1337"}
assert payment.transactions[0].interaction_id == "1337"
assert len(payment.transactions) == 2
assert payment.transactions[0].state == types.TransactionState.SUCCESS
```
#### File: commercetools-python-sdk/tests/test_utils.py
```python
import pytest
from commercetools.utils import fix_token_url
@pytest.mark.parametrize(
"token_url,expected_url",
[
("https://auth.sphere.io", "https://auth.sphere.io/oauth/token"),
("https://auth.sphere.io/oauth/token", "https://auth.sphere.io/oauth/token"),
("https://auth.commercetools.co", "https://auth.commercetools.co/oauth/token"),
(
"https://auth.sphere.io?test=123",
"https://auth.sphere.io/oauth/token?test=123",
),
],
)
def test_fix_token_url(token_url, expected_url):
assert fix_token_url(token_url) == expected_url
```
|
{
"source": "jeroenubbink/mach-composer",
"score": 2
}
|
#### File: src/mach/git.py
```python
import os
import re
import subprocess
from dataclasses import dataclass
from typing import List, Optional
import click
from mach import exceptions
PRETTY_FMT = {
"commit": "%H",
"author": "%aN <%aE>",
"date": "%ad",
"message": "%s",
}
PRETTY_FMT_STR = "format:" + "|".join([fmt for fmt in PRETTY_FMT.values()])
class GitError(exceptions.MachError):
pass
@dataclass
class Commit:
id: str
msg: str
def commit(message: str):
result = _run(["git", "status", "--short"])
if not result:
click.echo("No changes detected, won't commit anything")
return
_run(["git", "commit", "-m", message])
def add(file: str):
_run(["git", "add", file])
def ensure_local(repo: str, dest: str):
"""Ensure the repository is present on the given dest."""
reference = ""
reference_match = re.match(r"(.*)(?:(?:@)(\w+))$", repo)
if reference_match:
repo, reference = reference_match.groups()
if os.path.exists(dest):
_run(["git", "pull"], cwd=dest)
else:
clone(repo, dest)
if reference:
try:
_run(["git", "reset", "--hard", reference], cwd=dest)
except GitError as e:
raise GitError(f"Unable to swtich to reference {reference}: {e}")
def clone(repo: str, dest: str):
_run(["git", "clone", repo, dest])
def history(dir: str, from_ref: str, *, branch: Optional[str] = "") -> List[Commit]:
if branch:
_run(["git", "checkout", branch], cwd=dir)
cmd = ["git", "log", f"--pretty={PRETTY_FMT_STR}"]
if from_ref:
cmd.append(f"{from_ref}..{branch or ''}")
lines = _run(cmd, cwd=dir).decode("utf-8").splitlines()
commits = []
for line in lines:
commit_id, author, date, message = line.split("|", 3)
commits.append(
Commit(id=_clean_commit_id(commit_id), msg=_clean_commit_msg(message))
)
return commits
def _clean_commit_msg(msg: str) -> str:
return msg
def _clean_commit_id(commit_id: str) -> str:
"""Get the correct commit ID for this commit.
It will trim the short_id since mach and the components are using a
different commit id format (7 chars long).
"""
return commit_id[:7]
def _run(cmd: List, *args, **kwargs) -> bytes:
kwargs["stderr"] = subprocess.STDOUT
try:
return subprocess.check_output(cmd, *args, **kwargs)
except subprocess.CalledProcessError as e:
raise GitError(e.output.decode() if e.output else str(e)) from e
```
#### File: mach/types/base.py
```python
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
from dataclasses_json import config, dataclass_json
from dataclasses_jsonschema import JsonSchemaMixin
from . import fields
from .components import ComponentConfig
from .general_config import GlobalConfig
from .mach import MachComposerConfig
from .sites import Site
__all__ = ["MachConfig"]
@dataclass_json
@dataclass
class MachConfig(JsonSchemaMixin):
"""Main MACH configuration object."""
mach_composer: MachComposerConfig
general_config: GlobalConfig = field(metadata=config(field_name="global"))
sites: List[Site]
components: List[ComponentConfig] = fields.list_()
# Items that are not used in the configuration itself by set by the parser
output_path: str = "deployments"
file: Optional[str] = fields.none()
# Indicates that the config file is SOPS encrypted
file_encrypted: bool = False
variables: Dict[str, Any] = fields.dict_()
variables_path: str = fields.none()
variables_encrypted: bool = False
@property
def deployment_path(self) -> Path:
return Path(self.output_path)
def deployment_path_for(self, site: Site):
return self.deployment_path / Path(site.identifier)
def get_component(self, name: str) -> Optional[ComponentConfig]:
for comp in self.components:
if comp.name == name:
return comp
return None
```
#### File: tests/unittests/test_variables.py
```python
import pytest
from mach import variables
def test_resolve_variables():
vars = {
"my-value": "foo",
"secrets": {"site1": {"my-value": "bar"}},
"list": ["one", "two", {"nested-key": "three"}],
}
variables.resolve_variable("my-value", vars) == "foo"
with pytest.raises(variables.VariableNotFound):
variables.resolve_variable("my-other-value", vars)
variables.resolve_variable("secrets.site1.my-value", vars) == "bar"
with pytest.raises(variables.VariableNotFound):
variables.resolve_variable("secrets.site2.my-value", vars)
variables.resolve_variable("list.0", vars) == "one"
variables.resolve_variable("list.1", vars) == "two"
variables.resolve_variable("list.2", vars) == {"nested-key": "three"}
variables.resolve_variable("list.2.nested-key", vars) == "three"
with pytest.raises(variables.VariableNotFound):
variables.resolve_variable("my-value.string-attribute", vars)
```
|
{
"source": "jeroenubbink/syncthing2piwigo",
"score": 2
}
|
#### File: syncthing2piwigo/syncthing/event.py
```python
from typing import cast
from marshmallow import Schema, fields, EXCLUDE
class EventBaseSchema(Schema):
...
# FIXME: unsure how this works, should probably do something with timezone
# accept we have a dict for the nested schema right now.
class EventDateTimeField(fields.DateTime):
def _to_iso_format(self, syncthing_event_time: str) -> str:
# look at the crap we have to deal with...
# "2014-07-13T21:22:03.414609034+02:00"
# first transform nano_seconds into micro_seconds
_nano_seconds = syncthing_event_time.split(".")[1][:-6]
_micro_seconds = _nano_seconds[:6]
# correct tz
_syncthing_tz = syncthing_event_time[-6:]
_iso_format_tz = _syncthing_tz.replace(":", "")
_fixed_nano_seconds = syncthing_event_time.replace(_nano_seconds, _micro_seconds)
iso_format_string = _fixed_nano_seconds.replace(_syncthing_tz, _iso_format_tz)
return iso_format_string
def _to_syncthing_format(self, iso_format_time: str) -> str:
# let's add the crap and forget about the lost nano seconds
_micro_seconds = iso_format_time.split(".")[1][:-5]
_nano_seconds = f"{_micro_seconds}000"
_iso_format_tz = iso_format_time[-5:]
_syncthing_tz = f"{_iso_format_tz[:-2]}:{_iso_format_tz[-2:]}"
_fixed_micro_seconds = iso_format_time.replace(_micro_seconds, _nano_seconds)
syncthing_format_string = _fixed_micro_seconds.replace(_iso_format_tz, _syncthing_tz)
return syncthing_format_string
def _deserialize(self, value, attr, data, **kwargs):
print(f"deserialize value: {value}")
event_time: str = cast(str, value)
value_iso_format = self._to_iso_format(syncthing_event_time=event_time)
super()._deserialize(value_iso_format, attr, data, **kwargs)
def _serialize(self, value, attr, obj, **kwargs):
print(f"serialize value: {value}")
iso_time: str = super()._serialize(value, attr, obj, **kwargs)
value_syncthing_format = self._to_syncthing_format(iso_format_time=iso_time)
return value_syncthing_format
class EventDataSchema(EventBaseSchema):
item = fields.Str()
folder = fields.Str()
error = fields.Str(required=False, allow_none=True)
type = fields.Str()
action = fields.Str()
class Meta(EventBaseSchema.Meta):
unknown = EXCLUDE
class EventSchema(EventBaseSchema):
id = fields.Int()
globalID = fields.Int()
type = fields.Str()
time = EventDateTimeField()
data = fields.Nested(EventDataSchema(partial=True))
```
|
{
"source": "jeroenvanbaar/ReciprocityMotives",
"score": 2
}
|
#### File: Code/Functions/costFunctions.py
```python
import numpy as np
import pandas as pd
import choiceModels
def MP_costfun(param,subDat,printStep=False,printPredictions=False,resid_share=False):
theta = param[0]
phi = param[1]
for trial in range(subDat.shape[0]):
subDat.loc[trial,'prediction'] = choiceModels.MP_model(
subDat.loc[trial,'inv'],
subDat.loc[trial,'mult'],
subDat.loc[trial,'baseMult'],
subDat.loc[trial,'exp'],
theta, phi)
if resid_share == True:
residuals = (subDat.loc[:,'ret'] - subDat.loc[:,'prediction'])/(subDat.loc[:,'inv'] * subDat.loc[:,'mult'])
else:
residuals = subDat.loc[:,'ret'] - subDat.loc[:,'prediction']
residuals = residuals.astype('float')
SSE = np.sum(np.square(residuals))
if printStep==True:
print('theta = %.2f, phi = %.2f, SSE = %.2f'%(theta,phi,SSE))
if printPredictions == True:
print(subDat)
return residuals
def MP_costfun_ppSOE(param,subDat,printStep=False,printPredictions=False,resid_share=False):
theta = param[0]
phi = param[1]
for trial in range(subDat.shape[0]):
subDat.loc[trial,'prediction'] = choiceModels.MP_model_ppSOE(
subDat.loc[trial,'inv'],
subDat.loc[trial,'mult'],
subDat.loc[trial,'baseMult'],
subDat.loc[trial,'exp'],
theta, phi)
if resid_share == True:
residuals = (subDat.loc[:,'ret'] - subDat.loc[:,'prediction'])/(subDat.loc[:,'inv'] * subDat.loc[:,'mult'])
else:
residuals = subDat.loc[:,'ret'] - subDat.loc[:,'prediction']
residuals = residuals.astype('float')
SSE = np.sum(np.square(residuals))
if printStep==True:
print('theta = %.2f, phi = %.2f, SSE = %.2f'%(theta,phi,SSE))
if printPredictions == True:
print(subDat)
return residuals
def IA_costfun(theta,subDat,printStep=False,printPredictions=False,resid_share=False):
for trial in range(subDat.shape[0]):
subDat.loc[trial,'prediction'] = choiceModels.IA_model(
subDat.loc[trial,'inv'],
subDat.loc[trial,'mult'],
theta)
if resid_share == True:
residuals = (subDat.loc[:,'ret'] - subDat.loc[:,'prediction'])/(subDat.loc[:,'inv'] * subDat.loc[:,'mult'])
else:
residuals = subDat.loc[:,'ret'] - subDat.loc[:,'prediction']
residuals = residuals.astype('float')
SSE = np.sum(np.square(residuals))
if printStep==True:
print('theta = %.2f, SSE = %.2f'%(theta,SSE))
if printPredictions == True:
print(subDat)
return residuals
def GA_costfun(theta,subDat,printStep=False,printPredictions=False,resid_share=False):
for trial in range(subDat.shape[0]):
subDat.loc[trial,'prediction'] = choiceModels.GA_model(
subDat.loc[trial,'inv'],
subDat.loc[trial,'mult'],
subDat.loc[trial,'baseMult'],
subDat.loc[trial,'exp'],
theta)
if resid_share == True:
residuals = (subDat.loc[:,'ret'] - subDat.loc[:,'prediction'])/(subDat.loc[:,'inv'] * subDat.loc[:,'mult'])
else:
residuals = subDat.loc[:,'ret'] - subDat.loc[:,'prediction']
residuals = residuals.astype('float')
SSE = np.sum(np.square(residuals))
if printStep==True:
print('theta = %.2f, SSE = %.2f'%(theta,SSE))
if printPredictions == True:
print(subDat)
return residuals
def GR_costfun(subDat,printPredictions=False,resid_share=False):
for trial in range(subDat.shape[0]):
subDat.loc[trial,'prediction'] = choiceModels.GR_model()
if resid_share == True:
residuals = (subDat.loc[:,'ret'] - subDat.loc[:,'prediction'])/(subDat.loc[:,'inv'] * subDat.loc[:,'mult'])
else:
residuals = subDat.loc[:,'ret'] - subDat.loc[:,'prediction']
residuals = residuals.astype('float')
SSE = np.sum(np.square(residuals))
if printPredictions == True:
print(subDat)
return residuals
```
|
{
"source": "JeroenvdSande/dash-sample-apps",
"score": 3
}
|
#### File: dash-3d-image-partitioning/plotly-common/app_utils.py
```python
from os import environ
def get_env(name, default=None, conv=None, check_if_none=False):
try:
ret = environ[name]
except KeyError:
ret = default
if check_if_none and ret is None:
raise Exception("Specify " + name + ".")
return ret
if conv is not None:
return conv(ret)
return ret
```
#### File: apps/dash-airfoil-design/app.py
```python
import plotly.express as px
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import aerosandbox as asb
import aerosandbox.numpy as np
import copy
import plotly.figure_factory as ff
import pandas as pd
from app_components import *
### Build the app
app = dash.Dash(
__name__, external_stylesheets=[dbc.themes.MINTY], title="Airfoil Analysis"
)
server = app.server
app.layout = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
dcc.Markdown(
"""
# Airfoil Analysis with [AeroSandbox](https://github.com/peterdsharpe/AeroSandbox) and [Dash](https://plotly.com/dash/)
By [<NAME>](https://peterdsharpe.github.io/). Uses potential flow theory (viscous effects neglected, for now). [Source code here](https://github.com/peterdsharpe/Automotive-Airfoil-Design).
"""
)
],
width=True,
),
dbc.Col(
[
html.Img(
src="assets/MIT-logo-red-gray-72x38.svg",
alt="MIT Logo",
height="30px",
),
],
width=1,
),
],
align="end",
),
html.Hr(),
dbc.Row(
[
dbc.Col(
[
dbc.Button(
"Modify Operating Conditions", id="operating_button"
),
dbc.Collapse(
dbc.Card(dbc.CardBody(operating_slider_components,)),
id="operating_collapse",
is_open=False,
),
html.Hr(),
dbc.Button(
"Modify Shape Parameters (Kulfan)", id="shape_button"
),
dbc.Collapse(
dbc.Card(dbc.CardBody(kulfan_slider_components,)),
id="shape_collapse",
is_open=False,
),
html.Hr(),
dbc.Button(
"Show Raw Coordinates (*.dat format)",
id="coordinates_button",
),
dbc.Collapse(
dbc.Card(
dbc.CardBody(dcc.Markdown(id="coordinates_output"))
),
id="coordinates_collapse",
is_open=False,
),
html.Hr(),
dcc.Markdown("##### Commands"),
dbc.Button(
"Analyze",
id="analyze",
color="primary",
style={"margin": "5px"},
),
html.Hr(),
dcc.Markdown("##### Aerodynamic Performance"),
dbc.Spinner(html.P(id="text_output"), color="primary",),
],
width=3,
),
dbc.Col(
[dcc.Graph(id="display", style={"height": "90vh"}),],
width=9,
align="start",
),
]
),
html.Hr(),
dcc.Markdown(
"""
Aircraft design tools powered by [AeroSandbox](https://github.com/peterdsharpe/AeroSandbox). Build beautiful UIs for your scientific computing apps with [Plot.ly](https://plotly.com/) and [Dash](https://plotly.com/dash/)!
"""
),
],
fluid=True,
)
### Callback to make shape parameters menu expand
@app.callback(
Output("shape_collapse", "is_open"),
[Input("shape_button", "n_clicks")],
[State("shape_collapse", "is_open")],
)
def toggle_shape_collapse(n_clicks, is_open):
if n_clicks:
return not is_open
return is_open
### Callback to make operating parameters menu expand
@app.callback(
Output("operating_collapse", "is_open"),
[Input("operating_button", "n_clicks")],
[State("operating_collapse", "is_open")],
)
def toggle_shape_collapse(n_clicks, is_open):
if n_clicks:
return not is_open
return is_open
### Callback to make coordinates menu expand
@app.callback(
Output("coordinates_collapse", "is_open"),
[Input("coordinates_button", "n_clicks")],
[State("coordinates_collapse", "is_open")],
)
def toggle_shape_collapse(n_clicks, is_open):
if n_clicks:
return not is_open
return is_open
### Callback to make operating sliders display proper values
@app.callback(
Output("alpha_slider_output", "children"),
[Input("alpha_slider_input", "drag_value")],
)
def display_alpha_slider(drag_value):
return f"Angle of Attack: {drag_value}"
@app.callback(
Output("height_slider_output", "children"),
[Input("height_slider_input", "drag_value")],
)
def display_alpha_slider(drag_value):
return f"Height: {drag_value}"
@app.callback(
Output("streamline_density_slider_output", "children"),
[Input("streamline_density_slider_input", "drag_value")],
)
def display_streamline_density_slider(drag_value):
return f"Streamline Density: {drag_value}"
### The callback to make the kulfan sliders display proper values
for side in sides:
for i in range(n_kulfan_inputs_per_side):
@app.callback(
Output(f"kulfan_{side.lower()}_{i}_output", "children"),
[Input(f"kulfan_{side.lower()}_{i}_input", "drag_value")],
)
def display_slider_value(drag_value):
return f"Parameter: {drag_value}"
def make_table(dataframe):
return dbc.Table.from_dataframe(
dataframe, bordered=True, hover=True, responsive=True, striped=True, style={}
)
last_analyze_timestamp = None
n_clicks_last = 0
### The callback to draw the airfoil on the graph
@app.callback(
Output("display", "figure"),
Output("text_output", "children"),
Output("coordinates_output", "children"),
[
Input("analyze", "n_clicks"),
Input("alpha_slider_input", "value"),
Input("height_slider_input", "value"),
Input("streamline_density_slider_input", "value"),
Input("operating_checklist", "value"),
]
+ [
Input(f"kulfan_{side.lower()}_{i}_input", "value")
for side in sides
for i in range(n_kulfan_inputs_per_side)
],
)
def display_graph(
n_clicks, alpha, height, streamline_density, operating_checklist, *kulfan_inputs
):
### Figure out if a button was pressed
global n_clicks_last
if n_clicks is None:
n_clicks = 0
analyze_button_pressed = n_clicks > n_clicks_last
n_clicks_last = n_clicks
### Parse the checklist
ground_effect = "ground_effect" in operating_checklist
### Start constructing the figure
airfoil = asb.Airfoil(
coordinates=asb.get_kulfan_coordinates(
lower_weights=np.array(kulfan_inputs[n_kulfan_inputs_per_side:]),
upper_weights=np.array(kulfan_inputs[:n_kulfan_inputs_per_side]),
TE_thickness=0,
enforce_continuous_LE_radius=False,
n_points_per_side=200,
)
)
### Do coordinates output
coordinates_output = "\n".join(
["```"]
+ ["AeroSandbox Airfoil"]
+ ["\t%f\t%f" % tuple(coordinate) for coordinate in airfoil.coordinates]
+ ["```"]
)
### Continue doing the airfoil things
airfoil = airfoil.rotate(angle=-np.radians(alpha))
airfoil = airfoil.translate(0, height + 0.5 * np.sind(alpha))
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=airfoil.x(),
y=airfoil.y(),
mode="lines",
name="Airfoil",
fill="toself",
line=dict(color="blue"),
)
)
### Default text output
text_output = 'Click "Analyze" to compute aerodynamics!'
xrng = (-0.5, 1.5)
yrng = (-0.6, 0.6) if not ground_effect else (0, 1.2)
if analyze_button_pressed:
analysis = asb.AirfoilInviscid(
airfoil=airfoil.repanel(50),
op_point=asb.OperatingPoint(velocity=1, alpha=0,),
ground_effect=ground_effect,
)
x = np.linspace(*xrng, 100)
y = np.linspace(*yrng, 100)
X, Y = np.meshgrid(x, y)
u, v = analysis.calculate_velocity(x_field=X.flatten(), y_field=Y.flatten())
U = u.reshape(X.shape)
V = v.reshape(Y.shape)
streamline_fig = ff.create_streamline(
x,
y,
U,
V,
arrow_scale=1e-16,
density=streamline_density,
line=dict(color="#ff82a3"),
name="Streamlines",
)
fig = go.Figure(data=streamline_fig.data + fig.data)
text_output = make_table(
pd.DataFrame(
{"Engineering Quantity": ["C_L"], "Value": [f"{analysis.Cl:.3f}"]}
)
)
fig.update_layout(
xaxis_title="x/c",
yaxis_title="y/c",
showlegend=False,
yaxis=dict(scaleanchor="x", scaleratio=1),
margin={"t": 0},
title=None,
)
fig.update_xaxes(range=xrng)
fig.update_yaxes(range=yrng)
return fig, text_output, [coordinates_output]
if __name__ == "__main__":
app.run_server(debug=False)
```
#### File: apps/dash-baseball-statistics/index.py
```python
import os
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Dash Bootstrap components
import dash_bootstrap_components as dbc
# Navbar, layouts, custom callbacks
from navbar import Navbar
from layouts import (
appMenu,
menuSlider,
playerMenu,
teamLayout,
battingLayout,
fieldingLayout,
)
import callbacks
# Import app
from app import app
# Import server for deployment
from app import srv as server
app_name = os.getenv("DASH_APP_PATH", "/dash-baseball-statistics")
# Layout variables, navbar, header, content, and container
nav = Navbar()
header = dbc.Row(
dbc.Col(
html.Div(
[
html.H2(children="Major League Baseball History"),
html.H3(children="A Visualization of Historical Data"),
]
)
),
className="banner",
)
content = html.Div([dcc.Location(id="url"), html.Div(id="page-content")])
container = dbc.Container([header, content])
# Menu callback, set and return
# Declair function that connects other pages with content to container
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def display_page(pathname):
if pathname in [app_name, app_name + "/"]:
return html.Div(
[
dcc.Markdown(
"""
### The Applicaiton
This application is a portfolio project built by [<NAME>](https://devparra.github.io/) using Plotly's Dash,
faculty.ai's Dash Bootstrap Components, and Pandas. Using historical MLB (Major League Baseball) data,
this application provides visualizations for team and player statistics dating from 1903 to 2020. Selecting
from a dropdown menu, the era will update the list of available teams and players in the range set on the years
slider. The slider allows the user to adjust the range of years with which the data is presented.
### The Analysis
The applicaiton breaks down each baseballs teams win/loss performance within a range of the teams history.
Additionally, the application will break down the batting performance with the team batting average, BABIP, and strikeout
rate. The application also brakes down the piching perfomance using the teams ERA and strikeout to walk ratio. Finally the feilding
performance of each team is illustrated with total errors and double plays. The applicaiton will also breakdown
each of teams players statistics within the given era.
### The Data
The data used in this application was retrieved from [Seanlahman.com](http://www.seanlahman.com/baseball-archive/statistics/).
Provided by [Chadwick Baseball Bureau's GitHub](https://github.com/chadwickbureau/baseballdatabank/) .
This database is copyright 1996-2021 by <NAME>. This data is licensed under a Creative Commons Attribution-ShareAlike
3.0 Unported License. For details see: [CreativeCommons](http://creativecommons.org/licenses/by-sa/3.0/)
"""
)
],
className="home",
)
elif pathname.endswith("/team"):
return appMenu, menuSlider, teamLayout
elif pathname.endswith("/player"):
return appMenu, menuSlider, playerMenu, battingLayout
elif pathname.endswith("/field"):
return appMenu, menuSlider, playerMenu, fieldingLayout
else:
return "ERROR 404: Page not found!"
# Main index function that will call and return all layout variables
def index():
layout = html.Div([nav, container])
return layout
# Set layout to index function
app.layout = index()
# Call app server
if __name__ == "__main__":
# set debug to false when deploying app
app.run_server(debug=True)
```
#### File: apps/dash-brain-viewer/app.py
```python
import os
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_colorscales as dcs
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from mni import create_mesh_data, default_colorscale
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
app.title = "Brain Surface Viewer"
server = app.server
GITHUB_LINK = os.environ.get(
"GITHUB_LINK",
"https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-brain-viewer",
)
default_colorscale_index = [ea[1] for ea in default_colorscale]
axis_template = {
"showbackground": True,
"backgroundcolor": "#141414",
"gridcolor": "rgb(255, 255, 255)",
"zerolinecolor": "rgb(255, 255, 255)",
}
plot_layout = {
"title": "",
"margin": {"t": 0, "b": 0, "l": 0, "r": 0},
"font": {"size": 12, "color": "white"},
"showlegend": False,
"plot_bgcolor": "#141414",
"paper_bgcolor": "#141414",
"scene": {
"xaxis": axis_template,
"yaxis": axis_template,
"zaxis": axis_template,
"aspectratio": {"x": 1, "y": 1.2, "z": 1},
"camera": {"eye": {"x": 1.25, "y": 1.25, "z": 1.25}},
"annotations": [],
},
}
app.layout = html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("dash-logo.png")
),
html.H4("MRI Reconstruction"),
],
className="header__title",
),
html.Div(
[
html.P(
"Click on the brain to add an annotation. Drag the black corners of the graph to rotate."
)
],
className="header__info pb-20",
),
html.Div(
[
html.A(
"View on GitHub",
href=GITHUB_LINK,
target="_blank",
)
],
className="header__button",
),
],
className="header pb-20",
),
html.Div(
[
dcc.Graph(
id="brain-graph",
figure={
"data": create_mesh_data("human_atlas"),
"layout": plot_layout,
},
config={"editable": True, "scrollZoom": False},
)
],
className="graph__container",
),
],
className="container",
)
],
className="two-thirds column app__left__section",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
"Click colorscale to change", className="subheader"
),
dcs.DashColorscales(
id="colorscale-picker",
colorscale=default_colorscale_index,
),
]
)
],
className="colorscale pb-20",
),
html.Div(
[
html.P("Select option", className="subheader"),
dcc.RadioItems(
options=[
{"label": "Brain Atlas", "value": "human_atlas"},
{"label": "Cortical Thickness", "value": "human"},
{"label": "Mouse Brain", "value": "mouse"},
],
value="human_atlas",
id="radio-options",
labelClassName="label__option",
inputClassName="input__option",
),
],
className="pb-20",
),
html.Div(
[
html.Span("Click data", className="subheader"),
html.Span(" | "),
html.Span(
"Click on points in the graph.", className="small-text"
),
dcc.Loading(
html.Pre(id="click-data", className="info__container"),
type="dot",
),
],
className="pb-20",
),
html.Div(
[
html.Span("Relayout data", className="subheader"),
html.Span(" | "),
html.Span(
"Drag the graph corners to rotate it.",
className="small-text",
),
dcc.Loading(
html.Pre(id="relayout-data", className="info__container"),
type="dot",
),
],
className="pb-20",
),
html.Div(
[
html.P(
[
"Dash/Python code on ",
html.A(
children="GitHub.",
target="_blank",
href=GITHUB_LINK,
className="red-ish",
),
]
),
html.P(
[
"Brain data from Mcgill's ACE Lab ",
html.A(
children="Surface Viewer.",
target="_blank",
href="https://brainbrowser.cbrain.mcgill.ca/surface-viewer#ct",
className="red-ish",
),
]
),
]
),
],
className="one-third column app__right__section",
),
dcc.Store(id="annotation_storage"),
]
)
def add_marker(x, y, z):
""" Create a plotly marker dict. """
return {
"x": [x],
"y": [y],
"z": [z],
"mode": "markers",
"marker": {"size": 25, "line": {"width": 3}},
"name": "Marker",
"type": "scatter3d",
"text": ["Click point to remove annotation"],
}
def add_annotation(x, y, z):
""" Create plotly annotation dict. """
return {
"x": x,
"y": y,
"z": z,
"font": {"color": "black"},
"bgcolor": "white",
"borderpad": 5,
"bordercolor": "black",
"borderwidth": 1,
"captureevents": True,
"ay": -100,
"arrowcolor": "white",
"arrowwidth": 2,
"arrowhead": 0,
"text": "Click here to annotate<br>(Click point to remove)",
}
def marker_in_points(points, marker):
"""
Checks if the marker is in the list of points.
:params points: a list of dict that contains x, y, z
:params marker: a dict that contains x, y, z
:returns: index of the matching marker in list
"""
for index, point in enumerate(points):
if (
point["x"] == marker["x"]
and point["y"] == marker["y"]
and point["z"] == marker["z"]
):
return index
return None
@app.callback(
Output("brain-graph", "figure"),
[
Input("brain-graph", "clickData"),
Input("radio-options", "value"),
Input("colorscale-picker", "colorscale"),
],
[State("brain-graph", "figure"), State("annotation_storage", "data")],
)
def brain_graph_handler(click_data, val, colorscale, figure, current_anno):
""" Listener on colorscale, option picker, and graph on click to update the graph. """
# new option select
if figure["data"][0]["name"] != val:
figure["data"] = create_mesh_data(val)
figure["layout"] = plot_layout
cs = [[i / (len(colorscale) - 1), rgb] for i, rgb in enumerate(colorscale)]
figure["data"][0]["colorscale"] = cs
return figure
# modify graph markers
if click_data is not None and "points" in click_data:
y_value = click_data["points"][0]["y"]
x_value = click_data["points"][0]["x"]
z_value = click_data["points"][0]["z"]
marker = add_marker(x_value, y_value, z_value)
point_index = marker_in_points(figure["data"], marker)
# delete graph markers
if len(figure["data"]) > 1 and point_index is not None:
figure["data"].pop(point_index)
anno_index_offset = 2 if val == "mouse" else 1
try:
figure["layout"]["scene"]["annotations"].pop(
point_index - anno_index_offset
)
except Exception as error:
print(error)
pass
# append graph markers
else:
# iterate through the store annotations and save it into figure data
if current_anno is not None:
for index, annotations in enumerate(
figure["layout"]["scene"]["annotations"]
):
for key in current_anno.keys():
if str(index) in key:
figure["layout"]["scene"]["annotations"][index][
"text"
] = current_anno[key]
figure["data"].append(marker)
figure["layout"]["scene"]["annotations"].append(
add_annotation(x_value, y_value, z_value)
)
cs = [[i / (len(colorscale) - 1), rgb] for i, rgb in enumerate(colorscale)]
figure["data"][0]["colorscale"] = cs
return figure
@app.callback(Output("click-data", "children"), [Input("brain-graph", "clickData")])
def display_click_data(click_data):
return json.dumps(click_data, indent=4)
@app.callback(
Output("relayout-data", "children"), [Input("brain-graph", "relayoutData")]
)
def display_relayout_data(relayout_data):
return json.dumps(relayout_data, indent=4)
@app.callback(
Output("annotation_storage", "data"),
[Input("brain-graph", "relayoutData")],
[State("annotation_storage", "data")],
)
def save_annotations(relayout_data, current_data):
""" Update the annotations in the dcc store. """
if relayout_data is None:
raise PreventUpdate
if current_data is None:
return {}
for key in relayout_data.keys():
# to determine if the relayout has to do with annotations
if "scene.annotations" in key:
current_data[key] = relayout_data[key]
return current_data
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-canvas-ocr/app.py
```python
from PIL import Image
import base64
from io import BytesIO
import dash
import numpy as np
import dash_html_components as html
import dash_core_components as dcc
from dash_canvas import DashCanvas
from dash_canvas.utils import array_to_data_url, parse_jsonstring
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pytesseract
app = dash.Dash(__name__)
server = app.server
canvas_width = 800
canvas_height = 200
app.layout = html.Div(
[
# Banner
html.Div(
[
html.Img(src=app.get_asset_url("ocr-logo.png"), className="app__logo"),
html.H4("Dash OCR", className="header__text"),
],
className="app__header",
),
# Canvas
html.Div(
[
html.Div(
[
html.P(
"Write inside the canvas with your stylus and press Sign",
className="section_title",
),
html.Div(
DashCanvas(
id="canvas",
lineWidth=8,
width=canvas_width,
height=canvas_height,
hide_buttons=[
"zoom",
"pan",
"line",
"pencil",
"rectangle",
"select",
],
add_only=False,
lineColor="black",
goButtonTitle="Sign",
),
className="canvas-outer",
style={"margin-top": "1em"},
),
],
className="v-card-content",
),
html.Div(
html.Button(id="clear", children="clear"),
className="v-card-content-markdown-outer",
),
html.Div(
[
html.B("Text Recognition Output", className="section_title"),
dcc.Loading(dcc.Markdown(id="text-output", children="")),
],
className="v-card-content",
style={"margin-top": "1em"},
),
],
className="app__content",
),
]
)
@app.callback(Output("canvas", "json_objects"), [Input("clear", "n_clicks")])
def clear_canvas(n):
if n is None:
return dash.no_update
strings = ['{"objects":[ ]}', '{"objects":[]}']
return strings[n % 2]
@app.callback(
Output("text-output", "children"), [Input("canvas", "json_data")],
)
def update_data(string):
if string:
try:
mask = parse_jsonstring(string, shape=(canvas_height, canvas_width))
except:
return "Out of Bounding Box, click clear button and try again"
# np.savetxt('data.csv', mask) use this to save the canvas annotations as a numpy array
# Invert True and False
mask = (~mask.astype(bool)).astype(int)
image_string = array_to_data_url((255 * mask).astype(np.uint8))
# this is from canvas.utils.image_string_to_PILImage(image_string)
img = Image.open(BytesIO(base64.b64decode(image_string[22:])))
text = "{}".format(
pytesseract.image_to_string(img, lang="eng", config="--psm 6")
)
return text
else:
raise PreventUpdate
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-chess-analytics/app.py
```python
import dash
import ast
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from whitenoise import WhiteNoise
from chessboard import getChessboard, getHeatmap, getStackedBar, getBoard
from styles import *
# Read the .csv file with the preprocessed data.
url = "https://raw.githubusercontent.com/Exileus/DataVis2021_proj2/main/chess_app.csv"
df_original = pd.read_csv(
url,
sep=",",
dtype={"pawns": int, "knights": int, "bishops": int, "rooks": int, "queens": int},
converters={
"wKing_sqr": ast.literal_eval,
"bKing_sqr": ast.literal_eval,
"wQueen_sqr": ast.literal_eval,
"bQueen_sqr": ast.literal_eval,
"wRook_sqr": ast.literal_eval,
"bRook_sqr": ast.literal_eval,
"wRook2_sqr": ast.literal_eval,
"bRook2_sqr": ast.literal_eval,
"wBishop_sqr": ast.literal_eval,
"bBishop_sqr": ast.literal_eval,
"wBishop2_sqr": ast.literal_eval,
"bBishop2_sqr": ast.literal_eval,
"wKnight_sqr": ast.literal_eval,
"bKnight_sqr": ast.literal_eval,
"wKnight2_sqr": ast.literal_eval,
"bKnight2_sqr": ast.literal_eval,
},
)
max_moves = df_original["moves"].max()
min_elo, max_elo = df_original["avg_Elo"].min(), df_original["avg_Elo"].max()
# Define function to output an 8*8 dataframe based on a df and a list of column names to parse.
def board_output(df, col_list):
brd = np.zeros((8, 8))
for col_name in col_list:
for tup in df[col_name]:
if tup == (None, None):
pass
else:
brd[tup] += 1
return pd.DataFrame(brd)
# Define global variables for later.
g_color = "white_color"
g_piece = "King"
g_status, g_winner, g_time_control, g_game_type = ".*", ".*", ".*", ".*"
pieces_list = ["King", "Queen", "Rook", "Bishop", "Knight"]
# Define a dictionary to be used to update the board with the correct columns.
color_piece_dict = cp_dict = {
("white_color", "King"): ["wKing_sqr"],
("black_color", "King"): ["bKing_sqr"],
("white_color", "Queen"): ["wQueen_sqr"],
("black_color", "Queen"): ["bQueen_sqr"],
("white_color", "Rook"): ["wRook_sqr", "wRook2_sqr"],
("black_color", "Rook"): ["bRook_sqr", "bRook2_sqr"],
("white_color", "Bishop"): ["wBishop_sqr", "wBishop2_sqr"],
("black_color", "Bishop"): ["bBishop_sqr", "bBishop2_sqr"],
("white_color", "Knight"): ["wKnight_sqr", "wKnight2_sqr"],
("black_color", "Knight"): ["bKnight_sqr", "bKnight2_sqr"],
}
# Define an additional dict for dropdown status to use for callbacks.
dropdown_status_dict = st_dict = {
"st_all": ".*",
"st_draw": "draw",
"st_mate": "mate",
"st_resign": "resign",
"st_outoftime": "outoftime",
}
dropdown_winner_dict = wn_dict = {
"wn_all": ".*",
"wn_white": "white",
"wn_black": "black",
}
dropdown_time_control_dict = tc_dict = {
"tc_all": ".*",
"tc_bullet": "Bullet",
"tc_blitz": "Blitz",
"tc_classic": "Classical",
"tc_none": "Correspondence",
}
dropdown_game_type_dict = gt_dict = {
"gt_all": ".*",
"gt_std": "game",
"gt_tourney": "tournament",
}
# Set stylesheets and app.
# ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
FA = "https://use.fontawesome.com/releases/v5.12.1/css/all.css"
external_stylesheets = [dbc.themes.LUX, FA]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "CHESS KINGDOM"
server = app.server
server.wsgi_app = WhiteNoise(server.wsgi_app, root="static/")
# Defining app layout
margin_bottom = "30px"
# Banner
banner = dbc.Row(
children=[
dbc.Col(
html.Img(
src=app.get_asset_url("apple-touch-icon.png"),
id="logo",
style={"border-radius": "50%"},
),
width=2,
align="left",
),
dbc.Col(
html.H1("A Visualization of Endgame Chess Pieces"),
align="center",
width=10,
),
],
style={"margin-bottom": "50px", "margin-top": "-30px"},
align="center",
)
# Graph
graph = dbc.Row(
style={"margin-bottom": "30px", "margin-left": "auto", "margin-right": "auto"},
children=[
dcc.Graph(
id="chessboard",
animate=True,
style={
"margin-left": "auto",
"margin-right": "auto",
"background-color": "lightgray",
},
config={
"displayModeBar": False,
"scrollZoom": False,
"showAxisDragHandles": False,
},
)
],
)
# Stacked Bar
stacked_graph = dbc.Row(
style={"margin-bottom": "30px"},
justify="center",
children=[
dbc.Col(
width=10,
children=[
dcc.Graph(
id="stackedbar",
animate=True,
config={
"displayModeBar": False,
"scrollZoom": False,
"showAxisDragHandles": False,
},
)
],
)
],
)
text_margin = "6px"
c_total_games = dbc.Row(
style={"margin-bottom": "20px"},
justify="center",
children=[
dbc.Col(
children=[
html.Div(id="game_count", style={"text-align": "center"}),
html.Div(
"TOTAL GAMES",
style={"margin-left": text_margin, "text-align": "center"},
),
],
),
dbc.Col(
children=[
html.Div(id="white_wins", style={"text-align": "center"}),
html.Div(
"WINS BY WHITE",
style={"margin-left": text_margin, "text-align": "center"},
),
],
),
dbc.Col(
children=[
html.Div(id="black_wins", style={"text-align": "center"}),
html.Div(
"WINS BY BLACK",
style={"margin-left": text_margin, "text-align": "center"},
),
],
),
dbc.Col(
children=[
html.Div(id="draw", style={"text-align": "center"}),
html.Div(
"DRAWS", style={"margin-left": text_margin, "text-align": "center"}
),
],
),
],
)
# BLACK / WHITE
c_choose_side = dbc.Col(
style={"margin-bottom": margin_bottom},
children=[
html.Div(
str("Choose side").upper(),
style={"text-align": "center", "margin-bottom": text_margin},
),
dbc.Row(
justify="center",
children=[
dbc.ButtonGroup(
style={"text-align": "center"},
children=[
dbc.Button(
"White",
color="secondary",
n_clicks=0,
id="white_color",
outline=True,
active=True,
),
dbc.Button(
"Black",
color="dark",
n_clicks=0,
id="black_color",
outline=True,
active=False,
),
],
),
],
),
],
)
c_select_piece = dbc.Col(
style={"margin-bottom": margin_bottom},
width=9,
children=[
html.Div(
str("Select Piece").upper(),
style={"text-align": "center", "margin-bottom": text_margin},
),
dbc.Row(
justify="center",
children=[
dbc.ButtonGroup(
children=[
dbc.Button(
[
html.I(className=f"fas fa-chess-{name.lower()} mr-2"),
name,
],
color="primary",
n_clicks=0,
outline=True,
id=name,
active=False,
)
for name in pieces_list
],
)
],
),
],
)
c_elo_slider = dbc.Col(
style={
"margin-bottom": margin_bottom,
"margin-left": "auto",
"margin-right": "auto",
},
width=12,
children=[
html.Div(
str("Elo range").upper(),
style={"text-align": "center", "margin-bottom": text_margin},
),
dcc.RangeSlider(
id="elo_slider",
min=min_elo,
max=max_elo,
value=[min_elo, max_elo],
step=10,
pushable=1,
allowCross=False,
marks={
i: str(i)
for i in range(
int(min_elo) - 1,
int(max_elo) + 1,
int((max_elo - min_elo + 2) // 10),
)
},
),
],
)
c_moves_slider = dbc.Col(
style={
"margin-bottom": margin_bottom,
"margin-left": "auto",
"margin-right": "auto",
},
width=12,
children=[
html.Div(
str("Number of Moves").upper(),
style={"text-align": "center", "margin-bottom": text_margin},
),
dcc.RangeSlider(
id="moves_slider",
min=1,
max=max_moves,
value=[0, max_moves],
step=1,
pushable=1,
allowCross=False,
marks={i: str(i) for i in range(0, max_moves, 5)},
),
],
)
dropdown_status = dbc.DropdownMenu(
[
dbc.DropdownMenuItem(str("Status",).upper(), header=True,),
dbc.DropdownMenuItem(str("All").upper(), id="st_all", n_clicks=0),
dbc.DropdownMenuItem(str("Draws").upper(), id="st_draw", n_clicks=0),
dbc.DropdownMenuItem(str("Checkmate").upper(), id="st_mate", n_clicks=0),
dbc.DropdownMenuItem(str("Resignation").upper(), id="st_resign", n_clicks=0),
dbc.DropdownMenuItem(
str("Time Forfeit").upper(), id="st_outoftime", n_clicks=0
),
],
label="Status",
id="dropdown_status",
)
dropdown_winner = dbc.Collapse(
dbc.DropdownMenu(
[
dbc.DropdownMenuItem(str("Winning Side").upper(), header=True),
dbc.DropdownMenuItem(str("All").upper(), id="wn_all", n_clicks=0),
dbc.DropdownMenuItem(str("White").upper(), id="wn_white", n_clicks=0),
dbc.DropdownMenuItem(str("Black").upper(), id="wn_black", n_clicks=0),
],
label="Winning Side",
id="dropdown_winner",
),
id="wn_menu",
)
dropdown_time_control = dbc.DropdownMenu(
[
dbc.DropdownMenuItem(str("Time Control").upper(), header=True),
dbc.DropdownMenuItem(str("All").upper(), id="tc_all", n_clicks=0),
dbc.DropdownMenuItem(str("Bullet").upper(), id="tc_bullet", n_clicks=0),
dbc.DropdownMenuItem(str("Blitz").upper(), id="tc_blitz", n_clicks=0),
# dbc.DropdownMenuItem("Rapid",id="tc_rpd",n_clicks=0), if this shows up later then include it.
dbc.DropdownMenuItem(str("Classical").upper(), id="tc_classic", n_clicks=0),
dbc.DropdownMenuItem(str("No Time Control").upper(), id="tc_none", n_clicks=0),
],
label="Time Control",
id="dropdown_time_control",
)
dropdown_game_type = dbc.DropdownMenu(
[
dbc.DropdownMenuItem(str("Game Type").upper(), header=True),
dbc.DropdownMenuItem(str("All").upper(), id="gt_all", n_clicks=0),
dbc.DropdownMenuItem(str("Standard").upper(), id="gt_std", n_clicks=0),
dbc.DropdownMenuItem(str("Tournament").upper(), id="gt_tourney", n_clicks=0),
],
label="Game Type",
id="dropdown_game_type",
)
dropdown_states = dbc.Row(
justify="center",
children=[
html.Tbody("xsxsxs", id="g_status", style={"margin": "10px"}),
html.Tbody("xsxsxs", id="g_winner", style={"margin": "10px"}),
html.Tbody("xsxsxs", id="g_time_control", style={"margin": "10px"}),
html.Tbody(children="111", id="g_game_type", style={"margin": "10px"}),
],
)
popover_status = dbc.Popover(
[
dbc.PopoverHeader("Status of the Game"),
dbc.PopoverBody(
"Games can be over in a myriad of ways, either by checkmate, draw, player resignation, or when a player runs out of time. Filter the games by these conditions here."
),
],
trigger="hover",
target="dropdown_status",
placement="left",
)
popover_time_control = dbc.Popover(
[
dbc.PopoverHeader("Time Control Filter"),
dbc.PopoverBody(
"Players have a specific time to make their moves. The games in the dataset follow this convention: Bullet Games (0-3 minutes), Blitz(3-10 minutes), Classical(10 minutes+). Note: Lichess uses a slight different system today."
),
],
trigger="hover",
target="dropdown_time_control",
placement="left",
)
popover_game_type = dbc.Popover(
[
dbc.PopoverHeader("Type of Competitive Setting"),
dbc.PopoverBody(
"This dataset contains games played in specific tournaments, hosted by Lichess."
),
],
trigger="hover",
target="dropdown_game_type",
placement="left",
)
about_this = dbc.Row(
justify="end",
children=[
dbc.Button(str("About this Visualization").upper(), id="abt_us"),
dbc.Popover(
[
dbc.PopoverHeader("Powered by Lichess"),
dbc.PopoverBody(
"""This visualization is powered by a dataset of games played in April, 2017, sourced from the publicly available lichess database.\n
Authors: <NAME> 20200604, <NAME> 20200994, <NAME> 20200613.\nNova IMS, Data Visualization Course, 2021."""
),
],
trigger="click",
target="abt_us",
),
],
)
dropdown_menus = dbc.Row(
style={"margin-bottom": margin_bottom},
justify="center",
children=[
dropdown_status,
popover_status,
dropdown_winner,
dropdown_time_control,
popover_time_control,
dropdown_game_type,
popover_game_type,
],
)
app.layout = dbc.Jumbotron(
style={"background-color": "#ebebeb"}, # ADD SETTINGS HERE
children=[
# Banner
# Main Layout
dbc.Row( # ADD SETTINGS HERE
children=[
# PARAMETER SETTINGS COLUMN
dbc.Col(
children=[
banner,
c_total_games,
stacked_graph,
dbc.Row(
style={"margin-bottom": margin_bottom},
children=[c_choose_side, c_select_piece],
),
c_elo_slider,
c_moves_slider,
dropdown_menus,
dropdown_states,
]
),
# CHESS BOARD COLUMN
dbc.Col(width={"size": 6}, children=[graph, about_this]),
],
),
],
)
@app.callback(
Output("chessboard", "figure"),
Output("stackedbar", "figure"),
Output("game_count", "children"),
Output("white_wins", "children"),
Output("black_wins", "children"),
Output("draw", "children"),
Output("wn_menu", "is_open"),
Output("white_color", "active"),
Output("black_color", "active"),
Output("King", "active"),
Output("Queen", "active"),
Output("Rook", "active"),
Output("Bishop", "active"),
Output("Knight", "active"),
Output("moves_slider", "value"),
Output("g_status", "children"),
Output("g_winner", "children"),
Output("g_time_control", "children"),
Output("g_game_type", "children"),
[
Input("white_color", "n_clicks"),
Input("black_color", "n_clicks"),
Input("King", "n_clicks"),
Input("Queen", "n_clicks"),
Input("Rook", "n_clicks"),
Input("Bishop", "n_clicks"),
Input("Knight", "n_clicks"),
Input("elo_slider", "value"),
Input("st_all", "n_clicks"),
Input("st_draw", "n_clicks"),
Input("st_mate", "n_clicks"),
Input("st_resign", "n_clicks"),
Input("st_outoftime", "n_clicks"),
Input("wn_all", "n_clicks"),
Input("wn_white", "n_clicks"),
Input("wn_black", "n_clicks"),
Input("tc_all", "n_clicks"),
Input("tc_blitz", "n_clicks"),
Input("tc_bullet", "n_clicks"),
Input("tc_classic", "n_clicks"),
Input("tc_none", "n_clicks"),
Input("gt_all", "n_clicks"),
Input("gt_std", "n_clicks"),
Input("gt_tourney", "n_clicks"),
Input("moves_slider", "value"),
],
)
def update_chessboard(
white_color,
black_color,
King,
Queen,
Rook,
Bishop,
Knight,
elo_range,
st_all,
st_draw,
st_mate,
st_resign,
st_outoftime,
wn_all,
wn_white,
wn_black,
tc_all,
tc_blitz,
tc_bullet,
tc_classic,
tc_none,
gt_all,
gt_std,
gt_tourney,
move_range,
):
# Trigger button here, for when a button is pressed.
trigger_button = dash.callback_context.triggered[0]["prop_id"].split(".")[0]
global g_status
global g_winner
global g_time_control
global g_game_type
if trigger_button in st_dict.keys():
g_status = st_dict[trigger_button]
elif trigger_button in wn_dict.keys():
g_winner = wn_dict[trigger_button]
elif trigger_button in tc_dict.keys():
g_time_control = tc_dict[trigger_button]
elif trigger_button in gt_dict.keys():
g_game_type = gt_dict[trigger_button]
# Filters go here.
dff = df_original[
(df_original["avg_Elo"] >= int(elo_range[0]))
& (df_original["avg_Elo"] <= int(elo_range[1]))
& (df_original["moves"] >= int(move_range[0]))
& (df_original["moves"] <= int(move_range[-1]))
& (df_original["victory_status"].str.contains(g_status))
& (df_original["Winner"].str.contains(g_winner))
& (df_original["Event"].str.contains(g_time_control))
& (df_original["Event"].str.contains(g_game_type))
]
if dff.shape[0] == 0:
return dash.no_update
min_moves_, max_moves_ = dff["moves"].min(), dff["moves"].max()
# print(f"{min_moves_ = }, {max_moves_ = }")
value_ = [min_moves_, max_moves_]
# Before further manipulation, get the number of games from the filtered dataframe.
game_count = dff.shape[0]
game_results = dff.Winner.value_counts().to_dict()
game_results_norm = np.round(
dff.Winner.str.upper().value_counts(normalize=True), 4
).to_dict()
if "white" in game_results.keys():
white_wins = game_results["white"]
else:
white_wins = 0
if "black" in game_results.keys():
black_wins = game_results["black"]
else:
black_wins = 0
if "draw" in game_results.keys():
draw = game_results["draw"]
else:
draw = 0
print(game_results_norm)
stackedbar = getStackedBar(game_results_norm)
# Then retrieve the column of interest.
global g_color
global g_piece
if trigger_button in ["white_color", "black_color"]:
g_color = trigger_button
if trigger_button in pieces_list:
g_piece = trigger_button
df = board_output(dff, cp_dict[g_color, g_piece])
# Additionally:
if g_status == "draw":
is_open = False
else:
is_open = True
# Additionaly pt.2:
if g_color == "white_color":
wc_act, bc_act = True, False
else:
wc_act, bc_act = False, True
# Additionaly pt3:
k_act, q_act, r_act, b_act, n_act = [x == g_piece for x in pieces_list]
# Transform it for the heatmap.
df = (
df.stack()
.reset_index()
.rename(columns={"level_0": "rows", "level_1": "cols", 0: "freq"})
)
df["rows"] = df["rows"].replace({i: list(range(8))[::-1][i] for i in range(8)})
chessboard = getChessboard(800)
getBoard(chessboard)
chessboard.add_trace(getHeatmap(dataframe=df))
# print(
# f"{g_color = }, {g_game_type = }, {g_piece = }, {g_status = }, {g_time_control = }, {g_winner = }"
# )
g_status_ = {
".*": "Status: all",
"draw": "Status: draw",
"mate": "Status: checkmate",
"resign": "Status: resignation",
"outoftime": "Status: time forfeit",
}[g_status]
g_winner_ = {
".*": "winner: All",
"white": "winner: white",
"black": "winner: black",
}[g_winner]
g_time_control_ = {
".*": "time control: all",
"Bullet": "time control: Bullet",
"Blitz": "time control: Blitz",
"Classical": "time control: Classical",
"Correspondence": "time control: No Time Control",
}[g_time_control]
g_game_type_ = {
".*": "game type: all",
"game": "game type: standard",
"tournament": "game type: tournament",
}[g_game_type]
return (
chessboard,
stackedbar,
game_count,
white_wins,
black_wins,
draw,
is_open,
wc_act,
bc_act,
k_act,
q_act,
r_act,
b_act,
n_act,
value_,
g_status_.upper(),
g_winner_.upper(),
g_time_control_.upper(),
g_game_type_.upper(),
)
# Statring the dash app
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-daq-satellite-dashboard/app.py
```python
import time
import pathlib
import os
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import State, Input, Output
import dash_daq as daq
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
# This is for gunicorn
server = app.server
# Mapbox
MAPBOX_ACCESS_TOKEN = "<KEY>"
MAPBOX_STYLE = "mapbox://styles/plotlymapbox/cjyivwt3i014a1dpejm5r7dwr"
# Dash_DAQ elements
utc = html.Div(
id="control-panel-utc",
children=[
daq.LEDDisplay(
id="control-panel-utc-component",
value="16:23",
label="Time",
size=40,
color="#fec036",
backgroundColor="#2b2b2b",
)
],
n_clicks=0,
)
speed = html.Div(
id="control-panel-speed",
children=[
daq.Gauge(
id="control-panel-speed-component",
label="Speed",
min=0,
max=40,
showCurrentValue=True,
value=27.859,
size=175,
units="1000km/h",
color="#fec036",
)
],
n_clicks=0,
)
elevation = html.Div(
id="control-panel-elevation",
children=[
daq.Tank(
id="control-panel-elevation-component",
label="Elevation",
min=0,
max=1000,
value=650,
units="kilometers",
showCurrentValue=True,
color="#303030",
)
],
n_clicks=0,
)
temperature = html.Div(
id="control-panel-temperature",
children=[
daq.Tank(
id="control-panel-temperature-component",
label="Temperature",
min=0,
max=500,
value=290,
units="Kelvin",
showCurrentValue=True,
color="#303030",
)
],
n_clicks=0,
)
fuel_indicator = html.Div(
id="control-panel-fuel",
children=[
daq.GraduatedBar(
id="control-panel-fuel-component",
label="Fuel Level",
min=0,
max=100,
value=76,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
battery_indicator = html.Div(
id="control-panel-battery",
children=[
daq.GraduatedBar(
id="control-panel-battery-component",
label="Battery-Level",
min=0,
max=100,
value=85,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
longitude = html.Div(
id="control-panel-longitude",
children=[
daq.LEDDisplay(
id="control-panel-longitude-component",
value="0000.0000",
label="Longitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
],
n_clicks=0,
)
latitude = html.Div(
id="control-panel-latitude",
children=[
daq.LEDDisplay(
id="control-panel-latitude-component",
value="0050.9789",
label="Latitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
],
n_clicks=0,
)
solar_panel_0 = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-solar-panel-0",
label="Solar-Panel-0",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
solar_panel_1 = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-solar-panel-1",
label="Solar-Panel-1",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
camera = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-camera",
label="Camera",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
thrusters = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-thrusters",
label="Thrusters",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
motor = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-motor",
label="Motor",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
communication_signal = daq.Indicator(
className="panel-lower-indicator",
id="control-panel-communication-signal",
label="Signal",
labelPosition="bottom",
value=True,
color="#fec036",
style={"color": "#black"},
)
map_toggle = daq.ToggleSwitch(
id="control-panel-toggle-map",
value=True,
label=["Hide path", "Show path"],
color="#ffe102",
style={"color": "#black"},
)
minute_toggle = daq.ToggleSwitch(
id="control-panel-toggle-minute",
value=True,
label=["Past Hour", "Past Minute"],
color="#ffe102",
style={"color": "#black"},
)
# Side panel
satellite_dropdown = dcc.Dropdown(
id="satellite-dropdown-component",
options=[
{"label": "H45-K1", "value": "h45-k1"},
{"label": "L12-5", "value": "l12-5"},
],
clearable=False,
value="h45-k1",
)
satellite_dropdown_text = html.P(
id="satellite-dropdown-text", children=["Satellite", html.Br(), " Dashboard"]
)
satellite_title = html.H1(id="satellite-name", children="")
satellite_body = html.P(
className="satellite-description", id="satellite-description", children=[""]
)
side_panel_layout = html.Div(
id="panel-side",
children=[
satellite_dropdown_text,
html.Div(id="satellite-dropdown", children=satellite_dropdown),
html.Div(id="panel-side-text", children=[satellite_title, satellite_body]),
],
)
# Satellite location tracker
# Helper to straighten lines on the map
def flatten_path(xy1, xy2):
diff_rate = (xy2 - xy1) / 100
res_list = []
for i in range(100):
res_list.append(xy1 + i * diff_rate)
return res_list
map_data = [
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "Satellite Path",
"mode": "lines",
"line": {"width": 2, "color": "#707070"},
},
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "Current Position",
"mode": "markers",
"marker": {"size": 10, "color": "#fec036"},
},
]
map_layout = {
"mapbox": {
"accesstoken": MAPBOX_ACCESS_TOKEN,
"style": MAPBOX_STYLE,
"center": {"lat": 45},
},
"showlegend": False,
"autosize": True,
"paper_bgcolor": "#1e1e1e",
"plot_bgcolor": "#1e1e1e",
"margin": {"t": 0, "r": 0, "b": 0, "l": 0},
}
map_graph = html.Div(
id="world-map-wrapper",
children=[
map_toggle,
dcc.Graph(
id="world-map",
figure={"data": map_data, "layout": map_layout},
config={"displayModeBar": False, "scrollZoom": False},
),
],
)
# Histogram
histogram = html.Div(
id="histogram-container",
children=[
html.Div(
id="histogram-header",
children=[
html.H1(
id="histogram-title", children=["Select A Property To Display"]
),
minute_toggle,
],
),
dcc.Graph(
id="histogram-graph",
figure={
"data": [
{
"x": [i for i in range(60)],
"y": [i for i in range(60)],
"type": "scatter",
"marker": {"color": "#fec036"},
}
],
"layout": {
"margin": {"t": 30, "r": 35, "b": 40, "l": 50},
"xaxis": {"dtick": 5, "gridcolor": "#636363", "showline": False},
"yaxis": {"showgrid": False},
"plot_bgcolor": "#2b2b2b",
"paper_bgcolor": "#2b2b2b",
"font": {"color": "gray"},
},
},
config={"displayModeBar": False},
),
],
)
# Control panel + map
main_panel_layout = html.Div(
id="panel-upper-lower",
children=[
dcc.Interval(id="interval", interval=1 * 2000, n_intervals=0),
map_graph,
html.Div(
id="panel",
children=[
histogram,
html.Div(
id="panel-lower",
children=[
html.Div(
id="panel-lower-0",
children=[elevation, temperature, speed, utc],
),
html.Div(
id="panel-lower-1",
children=[
html.Div(
id="panel-lower-led-displays",
children=[latitude, longitude],
),
html.Div(
id="panel-lower-indicators",
children=[
html.Div(
id="panel-lower-indicators-0",
children=[solar_panel_0, thrusters],
),
html.Div(
id="panel-lower-indicators-1",
children=[solar_panel_1, motor],
),
html.Div(
id="panel-lower-indicators-2",
children=[camera, communication_signal],
),
],
),
html.Div(
id="panel-lower-graduated-bars",
children=[fuel_indicator, battery_indicator],
),
],
),
],
),
],
),
],
)
# Data generation
# Pandas
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
# Satellite H45-K1 data
df_non_gps_h_0 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "non_gps_data_h_0.csv"))
)
df_non_gps_m_0 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "non_gps_data_m_0.csv"))
)
df_gps_m_0 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "gps_data_m_0.csv"))
)
df_gps_h_0 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "gps_data_h_0.csv"))
)
# Satellite L12-5 data
df_non_gps_h_1 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "non_gps_data_h_1.csv"))
)
df_non_gps_m_1 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "non_gps_data_m_1.csv"))
)
df_gps_m_1 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "gps_data_m_1.csv"))
)
df_gps_h_1 = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "gps_data_h_1.csv"))
)
# Root
root_layout = html.Div(
id="root",
children=[
dcc.Store(id="store-placeholder"),
dcc.Store(
id="store-data",
data={
"hour_data_0": {
"elevation": [df_non_gps_h_0["elevation"][i] for i in range(60)],
"temperature": [
df_non_gps_h_0["temperature"][i] for i in range(60)
],
"speed": [df_non_gps_h_0["speed"][i] for i in range(60)],
"latitude": [
"{0:09.4f}".format(df_gps_h_0["lat"][i]) for i in range(60)
],
"longitude": [
"{0:09.4f}".format(df_gps_h_0["lon"][i]) for i in range(60)
],
"fuel": [df_non_gps_h_0["fuel"][i] for i in range(60)],
"battery": [df_non_gps_h_0["battery"][i] for i in range(60)],
},
"minute_data_0": {
"elevation": [df_non_gps_m_0["elevation"][i] for i in range(60)],
"temperature": [
df_non_gps_m_0["temperature"][i] for i in range(60)
],
"speed": [df_non_gps_m_0["speed"][i] for i in range(60)],
"latitude": [
"{0:09.4f}".format(df_gps_m_0["lat"][i]) for i in range(60)
],
"longitude": [
"{0:09.4f}".format(df_gps_m_0["lon"][i]) for i in range(60)
],
"fuel": [df_non_gps_m_0["fuel"][i] for i in range(60)],
"battery": [df_non_gps_m_0["battery"][i] for i in range(60)],
},
"hour_data_1": {
"elevation": [df_non_gps_h_1["elevation"][i] for i in range(60)],
"temperature": [
df_non_gps_h_1["temperature"][i] for i in range(60)
],
"speed": [df_non_gps_h_1["speed"][i] for i in range(60)],
"latitude": [
"{0:09.4f}".format(df_gps_h_1["lat"][i]) for i in range(60)
],
"longitude": [
"{0:09.4f}".format(df_gps_h_1["lon"][i]) for i in range(60)
],
"fuel": [df_non_gps_h_1["fuel"][i] for i in range(60)],
"battery": [df_non_gps_h_1["battery"][i] for i in range(60)],
},
"minute_data_1": {
"elevation": [df_non_gps_m_1["elevation"][i] for i in range(60)],
"temperature": [
df_non_gps_m_1["temperature"][i] for i in range(60)
],
"speed": [df_non_gps_m_1["speed"][i] for i in range(60)],
"latitude": [
"{0:09.4f}".format(df_gps_m_1["lat"][i]) for i in range(60)
],
"longitude": [
"{0:09.4f}".format(df_gps_m_1["lon"][i]) for i in range(60)
],
"fuel": [df_non_gps_m_1["fuel"][i] for i in range(60)],
"battery": [df_non_gps_m_1["battery"][i] for i in range(60)],
},
},
),
# For the case no components were clicked, we need to know what type of graph to preserve
dcc.Store(id="store-data-config", data={"info_type": "", "satellite_type": 0}),
side_panel_layout,
main_panel_layout,
],
)
app.layout = root_layout
# Callbacks Data
# Add new data every second/minute
@app.callback(
Output("store-data", "data"),
[Input("interval", "n_intervals")],
[State("store-data", "data")],
)
def update_data(interval, data):
new_data = data
# Update H45-K1 data when sat==0, update L12-5 data when sat==1
for sat in range(2):
if sat == 0:
gps_minute_file = df_gps_m_0
gps_hour_file = df_gps_h_0
else:
gps_minute_file = df_gps_m_1
gps_hour_file = df_gps_h_1
m_data_key = "minute_data_" + str(sat)
h_data_key = "hour_data_" + str(sat)
new_data[m_data_key]["elevation"].append(data[m_data_key]["elevation"][0])
new_data[m_data_key]["elevation"] = new_data[m_data_key]["elevation"][1:61]
new_data[m_data_key]["temperature"].append(data[m_data_key]["temperature"][0])
new_data[m_data_key]["temperature"] = new_data[m_data_key]["temperature"][1:61]
new_data[m_data_key]["speed"].append(data[m_data_key]["speed"][0])
new_data[m_data_key]["speed"] = new_data[m_data_key]["speed"][1:61]
new_data[m_data_key]["latitude"].append(
"{0:09.4f}".format(gps_minute_file["lat"][(60 + interval) % 3600])
)
new_data[m_data_key]["latitude"] = new_data[m_data_key]["latitude"][1:61]
new_data[m_data_key]["longitude"].append(
"{0:09.4f}".format(gps_minute_file["lon"][(60 + interval) % 3600])
)
new_data[m_data_key]["longitude"] = new_data[m_data_key]["longitude"][1:61]
new_data[m_data_key]["fuel"].append(data[m_data_key]["fuel"][0])
new_data[m_data_key]["fuel"] = new_data[m_data_key]["fuel"][1:61]
new_data[m_data_key]["battery"].append(data[m_data_key]["battery"][0])
new_data[m_data_key]["battery"] = new_data["minute_data_0"]["battery"][1:61]
if interval % 60000 == 0:
new_data[h_data_key]["elevation"].append(data[h_data_key]["elevation"][0])
new_data[h_data_key]["elevation"] = new_data[h_data_key]["elevation"][1:61]
new_data[h_data_key]["temperature"].append(
data[h_data_key]["temperature"][0]
)
new_data[h_data_key]["temperature"] = new_data[h_data_key]["temperature"][
1:61
]
new_data[h_data_key]["speed"].append(data[h_data_key]["speed"][0])
new_data[h_data_key]["speed"] = new_data[h_data_key]["speed"][1:61]
new_data[h_data_key]["latitude"].append(
"{0:09.4f}".format(gps_hour_file["lat"][interval % 60])
)
new_data[h_data_key]["latitude"] = new_data[h_data_key]["latitude"][1:61]
new_data[h_data_key]["longitude"].append(
"{0:09.4f}".format(gps_hour_file["lon"][interval % 60])
)
new_data[h_data_key]["longitude"] = new_data[h_data_key]["longitude"][1:61]
new_data[h_data_key]["fuel"].append(data[h_data_key]["fuel"][0])
new_data[h_data_key]["fuel"] = new_data[h_data_key]["fuel"][1:61]
new_data[h_data_key]["battery"].append(data[h_data_key]["battery"][0])
new_data[h_data_key]["battery"] = new_data[h_data_key]["battery"]
return new_data
# Callbacks Histogram
# Update the graph
@app.callback(
[
Output("histogram-graph", "figure"),
Output("store-data-config", "data"),
Output("histogram-title", "children"),
],
[
Input("interval", "n_intervals"),
Input("satellite-dropdown-component", "value"),
Input("control-panel-toggle-minute", "value"),
Input("control-panel-elevation", "n_clicks"),
Input("control-panel-temperature", "n_clicks"),
Input("control-panel-speed", "n_clicks"),
Input("control-panel-latitude", "n_clicks"),
Input("control-panel-longitude", "n_clicks"),
Input("control-panel-fuel", "n_clicks"),
Input("control-panel-battery", "n_clicks"),
],
[
State("store-data", "data"),
State("store-data-config", "data"),
State("histogram-graph", "figure"),
State("store-data-config", "data"),
State("histogram-title", "children"),
],
)
def update_graph(
interval,
satellite_type,
minute_mode,
elevation_n_clicks,
temperature_n_clicks,
speed_n_clicks,
latitude_n_clicks,
longitude_n_clicks,
fuel_n_clicks,
battery_n_clicks,
data,
data_config,
old_figure,
old_data,
old_title,
):
new_data_config = data_config
info_type = data_config["info_type"]
ctx = dash.callback_context
# Check which input fired off the component
if not ctx.triggered:
trigger_input = ""
else:
trigger_input = ctx.triggered[0]["prop_id"].split(".")[0]
# Update store-data-config['satellite_type']
if satellite_type == "h45-k1":
new_data_config["satellite_type"] = 0
elif satellite_type == "l12-5":
new_data_config["satellite_type"] = 1
else:
new_data_config["satellite_type"] = None
# Decide the range of Y given if minute_mode is on
def set_y_range(data_key):
if data_key == "elevation":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [0, 1000],
"autorange": False,
}
elif data_key == "temperature":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [0, 500],
"autorange": False,
}
elif data_key == "speed":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [0, 40],
"autorange": False,
}
elif data_key == "latitude":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [-90, 90],
"autorange": False,
"dtick": 30,
}
elif data_key == "longitude":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [0, 360],
"autorange": False,
}
elif data_key == "fuel" or data_key == "battery":
if minute_mode:
figure["layout"]["yaxis"] = {"rangemode": "normal", "autorange": True}
else:
figure["layout"]["yaxis"] = {
"rangemode": "normal",
"range": [0, 100],
"autorange": False,
}
# Function to update values
def update_graph_data(data_key):
string_buffer = ""
if data_config["satellite_type"] == 0:
string_buffer = "_0"
elif data_config["satellite_type"] == 1:
string_buffer = "_1"
if minute_mode:
figure["data"][0]["y"] = list(
reversed(data["minute_data" + string_buffer][data_key])
)
else:
figure["data"][0]["y"] = list(
reversed(data["hour_data" + string_buffer][data_key])
)
# Graph title changes depending on graphed data
new_title = data_key.capitalize() + " Histogram"
return [data_key, new_title]
# A default figure option to base off everything else from
figure = old_figure
# First pass checks if a component has been selected
if trigger_input == "control-panel-elevation":
set_y_range("elevation")
info_type, new_title = update_graph_data("elevation")
elif trigger_input == "control-panel-temperature":
set_y_range("temperature")
info_type, new_title = update_graph_data("temperature")
elif trigger_input == "control-panel-speed":
set_y_range("speed")
info_type, new_title = update_graph_data("speed")
elif trigger_input == "control-panel-latitude":
set_y_range("latitude")
info_type, new_title = update_graph_data("latitude")
elif trigger_input == "control-panel-longitude":
set_y_range("longitude")
info_type, new_title = update_graph_data("longitude")
elif trigger_input == "control-panel-fuel":
set_y_range("fuel")
info_type, new_title = update_graph_data("fuel")
elif trigger_input == "control-panel-battery":
set_y_range("battery")
info_type, new_title = update_graph_data("battery")
# If no component has been selected, check for most recent info_type, to prevent graph from always resetting
else:
if info_type in [
"elevation",
"temperature",
"speed",
"latitude",
"longitude",
"fuel",
"battery",
]:
set_y_range(info_type)
nil, new_title = update_graph_data(info_type)
return [figure, new_data_config, new_title]
else:
return [old_figure, old_data, old_title]
new_data_config["info_type"] = info_type
return [figure, new_data_config, new_title]
# Callbacks Dropdown
@app.callback(
Output("satellite-name", "children"),
[Input("satellite-dropdown-component", "value")],
)
def update_satellite_name(val):
if val == "h45-k1":
return "Satellite\nH45-K1"
elif val == "l12-5":
return "Satellite\nL12-5"
else:
return ""
@app.callback(
Output("satellite-description", "children"),
[Input("satellite-dropdown-component", "value")],
)
def update_satellite_description(val):
text = "Select a satellite to view using the dropdown above."
if val == "h45-k1":
text = (
"H45-K1, also known as GPS IIR-9 and GPS SVN-45, is an American navigation satellite which forms part "
"of the Global Positioning System. It was the ninth Block IIR GPS satellite to be launched, out of "
"thirteen in the original configuration, and twenty one overall. It was built by <NAME>, using "
"the AS-4000 satellite bus. -168 was launched at 22:09:01 UTC on 31 March 2003, atop a Delta II carrier "
"rocket, flight number D297, flying in the 7925-9.5 configuration. The launch took place from Space "
"Launch Complex 17A at the Cape Canaveral Air Force Station, and placed H45-K1 into a transfer orbit. "
"The satellite raised itself into medium Earth orbit using a Star-37FM apogee motor."
)
elif val == "l12-5":
text = (
"L12-5, also known as NRO Launch 22 or NROL-22, is an American signals intelligence satellite, "
"operated by the National Reconnaissance Office. Launched in 2006, it has been identified as the first "
"in a new series of satellites which are replacing the earlier Trumpet spacecraft. L12-5 was launched "
"by Boeing, using a Delta IV carrier rocket flying in the Medium+(4,2) configuration. The rocket was the "
"first Delta IV to launch from Vandenberg Air Force Base, flying from Space Launch Complex 6, a launch "
"pad originally constructed as part of abandoned plans for manned launches from Vandenberg, originally "
"using Titan rockets, and later Space Shuttles. The launch also marked the first launch of an Evolved "
"Expendable Launch Vehicle from Vandenberg, and the first launch of an NRO payload on an EELV."
)
return text
# Callbacks Map
@app.callback(
Output("world-map", "figure"),
[
Input("interval", "n_intervals"),
Input("control-panel-toggle-map", "value"),
Input("satellite-dropdown-component", "value"),
],
[
State("world-map", "figure"),
State("store-data", "data"),
State("store-data-config", "data"),
],
)
def update_word_map(clicks, toggle, satellite_type, old_figure, data, data_config):
figure = old_figure
string_buffer = ""
# Set string buffer as well as drawing the satellite path
if data_config["satellite_type"] == 0:
string_buffer = "_0"
figure["data"][0]["lat"] = [df_gps_m_0["lat"][i] for i in range(3600)]
figure["data"][0]["lon"] = [df_gps_m_0["lon"][i] for i in range(3600)]
elif data_config["satellite_type"] == 1:
string_buffer = "_1"
figure["data"][0]["lat"] = [df_gps_m_1["lat"][i] for i in range(3600)]
figure["data"][0]["lon"] = [df_gps_m_1["lon"][i] for i in range(3600)]
else:
figure["data"][0]["lat"] = [df_gps_m["lat"][i] for i in range(3600)]
figure["data"][0]["lon"] = [df_gps_m["lon"][i] for i in range(3600)]
if not string_buffer:
figure["data"][1]["lat"] = [1.0]
figure["data"][1]["lon"] = [1.0]
elif clicks % 2 == 0:
figure["data"][1]["lat"] = [
float(data["minute_data" + string_buffer]["latitude"][-1])
]
figure["data"][1]["lon"] = [
float(data["minute_data" + string_buffer]["longitude"][-1])
]
# If toggle is off, hide path
if not toggle:
figure["data"][0]["lat"] = []
figure["data"][0]["lon"] = []
return figure
# Callbacks Components
@app.callback(
Output("control-panel-utc-component", "value"), [Input("interval", "n_intervals")]
)
def update_time(interval):
hour = time.localtime(time.time())[3]
hour = str(hour).zfill(2)
minute = time.localtime(time.time())[4]
minute = str(minute).zfill(2)
return hour + ":" + minute
@app.callback(
[
Output("control-panel-elevation-component", "value"),
Output("control-panel-temperature-component", "value"),
Output("control-panel-speed-component", "value"),
Output("control-panel-fuel-component", "value"),
Output("control-panel-battery-component", "value"),
],
[Input("interval", "n_intervals"), Input("satellite-dropdown-component", "value")],
[State("store-data-config", "data"), State("store-data", "data")],
)
def update_non_gps_component(clicks, satellite_type, data_config, data):
string_buffer = ""
if data_config["satellite_type"] == 0:
string_buffer = "_0"
if data_config["satellite_type"] == 1:
string_buffer = "_1"
new_data = []
components_list = ["elevation", "temperature", "speed", "fuel", "battery"]
# Update each graph value
for component in components_list:
new_data.append(data["minute_data" + string_buffer][component][-1])
return new_data
@app.callback(
[
Output("control-panel-latitude-component", "value"),
Output("control-panel-longitude-component", "value"),
],
[Input("interval", "n_intervals"), Input("satellite-dropdown-component", "value")],
[State("store-data-config", "data"), State("store-data", "data")],
)
def update_gps_component(clicks, satellite_type, data_config, data):
string_buffer = ""
if data_config["satellite_type"] == 0:
string_buffer = "_0"
if data_config["satellite_type"] == 1:
string_buffer = "_1"
new_data = []
for component in ["latitude", "longitude"]:
val = list(data["minute_data" + string_buffer][component][-1])
if val[0] == "-":
new_data.append("0" + "".join(val[1::]))
else:
new_data.append("".join(val))
return new_data
@app.callback(
[
Output("control-panel-latitude-component", "color"),
Output("control-panel-longitude-component", "color"),
],
[Input("interval", "n_intervals"), Input("satellite-dropdown-component", "value")],
[State("store-data-config", "data"), State("store-data", "data")],
)
def update_gps_color(clicks, satellite_type, data_config, data):
string_buffer = ""
if data_config["satellite_type"] == 0:
string_buffer = "_0"
if data_config["satellite_type"] == 1:
string_buffer = "_1"
new_data = []
for component in ["latitude", "longitude"]:
value = float(data["minute_data" + string_buffer][component][-1])
if value < 0:
new_data.append("#ff8e77")
else:
new_data.append("#fec036")
return new_data
@app.callback(
Output("control-panel-communication-signal", "value"),
[Input("interval", "n_intervals"), Input("satellite-dropdown-component", "value")],
)
def update_communication_component(clicks, satellite_type):
if clicks % 2 == 0:
return False
else:
return True
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-daq-tektronix350/osc_tds350.py
```python
import visa
import numpy as np
oscilloscope = None
# Adapted from code seen here:
# https://github.com/baroobob/TektronixTDS2024B/blob/master/TektronixTDS2024B.py
def get_data():
global oscilloscope
rm = visa.ResourceManager()
oscilloscope = rm.open_resource("GPIB0::1::INSTR")
write("DATA:SOURCE CH1")
write("DATA:WIDTH 2")
write("DATa:ENCdg SRIbinary")
ymult = float(query("WFMPRE:CH1:YMULT?"))
yzero = float(query("WFMPRE:CH1:YZERO?"))
yoff = float(query("WFMPRE:CH1:YOFF?"))
xincr = float(query("WFMPRE:CH1:XINCR?"))
write("AUTOSET EXECUTE")
write("CURVE?")
data = oscilloscope.read_raw()
headerlen = 2 + int(data[1])
header = data[:headerlen]
ADC_wave = data[headerlen:-1]
ADC_wave = np.fromstring(ADC_wave, dtype=np.int16)
y = (ADC_wave - yoff) * ymult + yzero
x = np.arange(0, xincr * len(y), xincr)
oscilloscope.close()
return [
{
"x": x,
"y": y,
"type": "line",
"showscale": False,
"colorscale": [[0, "rgba(255, 255, 255,0)"], [1, "rgba(0,0,255,1)"]],
}
]
def get_data_tuple():
global oscilloscope
rm = visa.ResourceManager()
oscilloscope = rm.open_resource("GPIB0::1::INSTR")
write("DATA:SOURCE CH1")
write("DATA:WIDTH 2")
write("DATa:ENCdg SRIbinary")
ymult = float(query("WFMPRE:CH1:YMULT?"))
yzero = float(query("WFMPRE:CH1:YZERO?"))
yoff = float(query("WFMPRE:CH1:YOFF?"))
xincr = float(query("WFMPRE:CH1:XINCR?"))
write("AUTOSET EXECUTE")
write("CURVE?")
data = oscilloscope.read_raw()
headerlen = 2 + int(data[1])
header = data[:headerlen]
ADC_wave = data[headerlen:-1]
ADC_wave = np.fromstring(ADC_wave, dtype=np.int16)
y = (ADC_wave - yoff) * ymult + yzero
x = np.arange(0, xincr * len(y), xincr)
return (x, y)
def query(command):
return oscilloscope.query(command)
def write(command):
oscilloscope.write(command)
# page 204
```
#### File: apps/dash-deck-explorer/app.py
```python
from importlib import import_module
import inspect
from textwrap import dedent
import os
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from tqdm import tqdm
def Header(name, app):
title = html.H1(name, style={"margin-top": 5})
logo = html.Img(
src=app.get_asset_url("dash-logo.png"), style={"float": "right", "height": 60}
)
link = html.A(logo, href="https://plotly.com/dash/")
return dbc.Row([dbc.Col(title, md=8), dbc.Col(link, md=4)])
def format_demo_name(demo):
return demo.replace("usage-", "").replace("-", " ").title()
ignored_demos = ["usage-events.py", "usage-style-prop.py"]
deck_demos = [
n.replace(".py", "").replace("usage-", "")
for n in sorted(os.listdir("./demos"))
if ".py" in n and n not in ignored_demos
]
deck_modules = {demo: import_module(f"demos.usage-{demo}") for demo in tqdm(deck_demos)}
print("Demos Loaded:", deck_demos)
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY])
server = app.server
app_selection = dbc.FormGroup(
[
dbc.Label("Select Demo", width=3),
dbc.Col(
dbc.Select(
id="demo-selection",
options=[
{"label": demo.replace("-", " ").title(), "value": demo}
for demo in deck_demos
],
className="form-control-plaintext",
),
width=9,
),
],
row=True,
)
tab_style = {"height": "calc(100vh - 230px)", "padding": "15px"}
# tab_style = {'max-height': 'calc(100vh - 210px)'}
tabs = dbc.Tabs(
[
dbc.Tab(dcc.Markdown(id="description", style=tab_style), label="Description"),
dbc.Tab(dcc.Markdown(id="source-code", style=tab_style), label="Source Code"),
]
)
layout = [
Header("Dash Deck Explorer", app),
html.Br(),
dcc.Location(id="url", refresh=False),
dbc.Row(
[
dbc.Col(
dbc.Card(
id="deck-card", style={"height": "calc(100vh - 110px)"}, body=True
),
md=6,
),
dbc.Col([app_selection, tabs], md=6),
]
),
]
app.layout = dbc.Container(layout, fluid=True)
@app.callback(
Output("url", "pathname"),
Input("demo-selection", "value"),
State("url", "pathname"),
)
def update_url(name, pathname):
if name is None:
if pathname in ["/dash-deck-explorer/", None, "/dash-deck-explorer"]:
name = deck_demos[0]
else:
return dash.no_update
return "/dash-deck-explorer/" + name
@app.callback(
[
Output("deck-card", "children"),
Output("description", "children"),
Output("source-code", "children"),
],
Input("url", "pathname"),
)
def update_demo(pathname):
if pathname in ["/dash-deck-explorer/", None, "/"]:
return dash.no_update
name = pathname.split("/")[-1]
module = deck_modules[name]
deck_component = module.app.layout
desc = module.__doc__
code = f"```\n{inspect.getsource(module)}\n```"
end = dedent(
f"""
-----
* Source Code on GitHub: [Link to demo](https://github.com/plotly/dash-deck/blob/master/demos/usage-{name}.py)
* Dash Deck for enterprises: [Contact us](https://plotly.com/contact-us)
* Download it now: [PyPi](https://pypi.org/project/dash-deck)
* About Dash Deck: [Readme](https://github.com/plotly/dash-deck/blob/master/README.md) | [Announcement](https://community.plotly.com/)
"""
)
return deck_component, desc + end, code
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-drug-discovery/app.py
```python
import dash
import pandas as pd
import pathlib
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from helpers import make_dash_table, create_plot
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
server = app.server
DATA_PATH = pathlib.Path(__file__).parent.joinpath("data").resolve()
# read from datasheet
df = pd.read_csv(DATA_PATH.joinpath("small_molecule_drugbank.csv")).drop(
["Unnamed: 0"], axis=1
)
STARTING_DRUG = "Levobupivacaine"
DRUG_DESCRIPTION = df.loc[df["NAME"] == STARTING_DRUG]["DESC"].iloc[0]
DRUG_IMG = df.loc[df["NAME"] == STARTING_DRUG]["IMG_URL"].iloc[0]
FIGURE = create_plot(
x=df["PKA"],
y=df["LOGP"],
z=df["SOL"],
size=df["MW"],
color=df["MW"],
name=df["NAME"],
)
app.layout = html.Div(
[
html.Div(
[html.Img(src=app.get_asset_url("dash-logo.png"))], className="app__banner"
),
html.Div(
[
html.Div(
[
html.Div(
[
html.H3(
"dash for drug discovery",
className="uppercase title",
),
html.Span("Hover ", className="uppercase bold"),
html.Span(
"over a drug in the graph to see its structure."
),
html.Br(),
html.Span("Select ", className="uppercase bold"),
html.Span(
"a drug in the dropdown to add it to the drug candidates at the bottom."
),
]
)
],
className="app__header",
),
html.Div(
[
dcc.Dropdown(
id="chem_dropdown",
multi=True,
value=[STARTING_DRUG],
options=[{"label": i, "value": i} for i in df["NAME"]],
)
],
className="app__dropdown",
),
html.Div(
[
html.Div(
[
dcc.RadioItems(
id="charts_radio",
options=[
{"label": "3D Scatter", "value": "scatter3d"},
{"label": "2D Scatter", "value": "scatter"},
{
"label": "2D Histogram",
"value": "histogram2d",
},
],
labelClassName="radio__labels",
inputClassName="radio__input",
value="scatter3d",
className="radio__group",
),
dcc.Graph(
id="clickable-graph",
hoverData={"points": [{"pointNumber": 0}]},
figure=FIGURE,
),
],
className="two-thirds column",
),
html.Div(
[
html.Div(
[
html.Img(
id="chem_img",
src=DRUG_IMG,
className="chem__img",
)
],
className="chem__img__container",
),
html.Div(
[
html.A(
STARTING_DRUG,
id="chem_name",
href="https://www.drugbank.ca/drugs/DB01002",
target="_blank",
),
html.P(DRUG_DESCRIPTION, id="chem_desc"),
],
className="chem__desc__container",
),
],
className="one-third column",
),
],
className="container card app__content bg-white",
),
html.Div(
[
html.Table(
make_dash_table([STARTING_DRUG], df),
id="table-element",
className="table__container",
)
],
className="container bg-white p-0",
),
],
className="app__container",
),
]
)
def df_row_from_hover(hoverData):
""" Returns row for hover point as a Pandas Series. """
try:
point_number = hoverData["points"][0]["pointNumber"]
molecule_name = str(FIGURE["data"][0]["text"][point_number]).strip()
return df.loc[df["NAME"] == molecule_name]
except KeyError as error:
print(error)
return pd.Series()
@app.callback(
Output("clickable-graph", "figure"),
[Input("chem_dropdown", "value"), Input("charts_radio", "value")],
)
def highlight_molecule(chem_dropdown_values, plot_type):
"""
Selected chemical dropdown values handler.
:params chem_dropdown_values: selected dropdown values
:params plot_type: selected plot graph
"""
return create_plot(
x=df["PKA"],
y=df["LOGP"],
z=df["SOL"],
size=df["MW"],
color=df["MW"],
name=df["NAME"],
markers=chem_dropdown_values,
plot_type=plot_type,
)
@app.callback(Output("table-element", "children"), [Input("chem_dropdown", "value")])
def update_table(chem_dropdown_value):
"""
Update the table rows.
:params chem_dropdown_values: selected dropdown values
"""
return make_dash_table(chem_dropdown_value, df)
@app.callback(
[
Output("chem_name", "children"),
Output("chem_name", "href"),
Output("chem_img", "src"),
Output("chem_desc", "children"),
],
[Input("clickable-graph", "hoverData")],
)
def chem_info_on_hover(hoverData):
"""
Display chemical information on graph hover.
Update the image, link, description.
:params hoverData: data on graph hover
"""
if hoverData is None:
raise PreventUpdate
try:
row = df_row_from_hover(hoverData)
if row.empty:
raise Exception
return (
row["NAME"].iloc[0],
row["PAGE"].iloc[0],
row["IMG_URL"].iloc[0],
row["DESC"].iloc[0],
)
except Exception as error:
print(error)
raise PreventUpdate
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-fashion-mnist-explorer/app.py
```python
import base64
from io import BytesIO
import numpy as np
from keras.models import load_model
from PIL import Image
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import pickle
import plotly.express as px
from helpers import load_mnist, parse_image, numpy_to_b64, create_img, label_mapping
train_images, train_labels = load_mnist("./fashion", subset="train")
test_images, test_labels = load_mnist("./fashion", subset="test")
all_labels = np.concatenate((train_labels, test_labels))
X_train = train_images.reshape(60000, 28, 28, 1)
X_test = test_images.reshape(10000, 28, 28, 1)
all_images = np.concatenate((X_train, X_test), axis=0)
train_X_hat = np.load("trained_data/train_tsne.npy")
test_X_hat = np.load("trained_data/test_tsne.npy")
all_X_hat = np.load("trained_data/all_images_tsne.npy")
model = load_model("trained_data/fashion_mnist_cnn.h5")
intro_text = """
This app applies T-SNE on the images from the [fashion mnist
dataset](https://github.com/zalandoresearch/fashion-mnist), which reduces each
image to a two dimensional embedding to visualize the similarity between
images. Clusters represent similar images, and the greater the distance between
two points, the less similar the images are. The app also allows to predict the class of each image using a simple
convolutional neural network (CNN). The T-SNE embeddings were generated using [RAPIDS
cuML](https://github.com/rapidsai/cuml), and the CNN was trained using Keras.
Hover over each point in the tsne graph to see the image it represents. You can
click an individual point to see the CNN's prediction for that point, as well
as the ground-truth label. You can also upload your own image to see how the CNN would classify it, as well as
to display the images' approximate location in the T-SNE space (new embeddings
are approximated using a linear model fit on the mnist images, using the
original mnist T-SNE embeddings as the dependent variable).
"""
def create_tsne_graph(data, uploaded_point=None):
colors = px.colors.qualitative.Pastel
traces = []
for i, key in enumerate(label_mapping.keys()):
# Training data
idx = np.where(train_labels == key)
x = all_X_hat[idx, 0].flatten()
y = all_X_hat[idx, 1].flatten()
if data in ["Train", "All"]:
opacity = 0.9
hoverinfo = "all"
showlegend = True
visible = True
else:
opacity = 0.5
hoverinfo = "none"
showlegend = False
visible = "legendonly"
trace = {
"x": x,
"y": y,
"mode": "markers",
"type": "scattergl",
"marker": {"color": colors[i], "size": 3},
"name": label_mapping[key],
"text": label_mapping[key],
"customdata": idx[0],
"opacity": opacity,
"hoverinfo": hoverinfo,
"visible": visible,
"showlegend": showlegend,
"selected": {"marker": {"size": 10, "color": "black"}},
}
traces.append(trace)
for i, key in enumerate(label_mapping.keys()):
# Test data
idx = np.where(test_labels == key)
x = all_X_hat[(idx[0] + len(train_labels)), 0].flatten()
y = all_X_hat[(idx[0] + len(train_labels)), 1].flatten()
if data in ["Test", "All"]:
opacity = 0.9
hoverinfo = "all"
showlegend = True if data == "Test" else False
visible = True
else:
opacity = 0.5
hoverinfo = "none"
showlegend = False
visible = "legendonly"
trace = {
"x": x,
"y": y,
"mode": "markers",
"type": "scattergl",
"marker": {"color": colors[i], "size": 3},
"name": label_mapping[key],
"text": label_mapping[key],
"customdata": idx[0] + len(train_labels),
"opacity": opacity,
"hoverinfo": hoverinfo,
"visible": visible,
"showlegend": showlegend,
"selected": {"marker": {"size": 10, "color": "black"}},
}
traces.append(trace)
annotation = []
if uploaded_point:
annotation.append(
{
"x": uploaded_point[0][0],
"y": uploaded_point[0][1],
"xref": "x",
"yref": "y",
"text": "Predicted Embedding for Uploaded Image",
"showarrow": True,
"arrowhead": 1,
"ax": 10,
"ay": -40,
"font": {"size": 20},
}
)
layout = {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"clickmode": "event+select",
"annotations": annotation,
}
figure = {"data": traces, "layout": layout}
return figure
app = dash.Dash(name=__name__)
server = app.server
app.css.config.serve_locally = False
app.config.suppress_callback_exceptions = True
header = html.Div(
id="app-header",
children=[
html.Img(src=app.get_asset_url("dash-logo.png"), className="logo"),
"Fashion MNIST Explorer: T-SNE and CNN",
],
)
app.layout = html.Div(
children=[
header,
html.Br(),
html.Details(
id="intro-text",
children=[html.Summary(html.B("About This App")), dcc.Markdown(intro_text)],
),
# html.Div(html.Div(id="intro-text", children=dcc.Markdown(intro_text),),),
html.Div(
id="app-body",
children=[
html.Div(
id="control-card",
children=[
html.Span(
className="control-label",
children="Display Train or Test Data",
),
dcc.Dropdown(
id="train-test-dropdown",
className="control-dropdown",
options=[
{"label": i, "value": i}
for i in ["Train", "Test", "All"]
],
value="Train",
),
html.Span(
className="control-label", children="Upload an Image"
),
dcc.Upload(
id="img-upload",
className="upload-component",
children=html.Div(
["Drag and Drop or ", html.A("Select Files")]
),
),
html.Div(id="output-img-upload"),
],
),
html.Div(
style={"width": "75vw"},
children=[
html.Div(
id="tsne-graph-div",
children=[
html.Div(
id="tsne-graph-outer",
children=[
# html.Div(
# id="intro-text",
# children=dcc.Markdown(intro_text),
# ),
html.H3(
className="graph-title",
children="Fashion MNIST Images Reduced to 2 Dimensions with T-SNE",
),
dcc.Graph(
id="tsne-graph",
figure=create_tsne_graph("Test"),
),
],
)
],
),
html.Div(
id="image-card-div",
children=[
html.Div(
id="hover-point-outer",
className="img-card",
children=[
html.Div(
"Hover Point:", style={"height": "20%"}
),
html.Br(),
html.Br(),
html.Br(),
html.Img(
id="hover-point-graph", className="image"
),
],
),
html.Div(
id="prediction-div",
className="img-card",
children=[
html.Div(
id="selected-data-graph-outer",
children=[
html.Div(
children=[
html.Div("Selected Point:"),
html.Div(
id="prediction",
children=[
"Click on a point to display the Network's prediction",
html.Br(),
html.Br(),
],
),
],
style={"height": "20%"},
),
html.Br(),
html.Img(
id="selected-data-graph",
className="image",
src=create_img(np.zeros((28, 28))),
),
],
)
],
),
],
),
],
),
],
),
]
)
@app.callback(
Output("output-img-upload", "children"),
[Input("img-upload", "contents")],
[State("img-upload", "filename"), State("img-upload", "last_modified")],
)
def display_uploaded_img(contents, fname, date):
if contents is not None:
original_img, resized_img = parse_image(contents, fname, date)
img = np.expand_dims(resized_img, axis=0)
prediction_array = model.predict(img)
prediction = np.argmax(prediction_array)
children = [
"Your uploaded image: ",
html.Img(className="image", src=original_img),
"Image fed the model: ",
html.Img(className="image", src=create_img(resized_img)),
f"The model thinks this is a {label_mapping[prediction]}",
html.Button(
id="clear-button", children="Remove Uploaded Image", n_clicks=0
),
]
return children
@app.callback(Output("img-upload", "contents"), [Input("clear-button", "n_clicks")])
def clear_upload(n_clicks):
if n_clicks >= 1:
return None
raise dash.exceptions.PreventUpdate
@app.callback(
Output("tsne-graph", "figure"),
[Input("train-test-dropdown", "value"), Input("img-upload", "contents")],
[State("img-upload", "filename"), State("img-upload", "last_modified")],
)
def display_train_test(value, contents, fname, date):
embedding_prediction = None
if contents is not None:
original_img, resized_img = parse_image(contents, fname, date)
linear_model = pickle.load(
open("trained_data/linear_model_embeddings.sav", "rb")
)
embedding_prediction = linear_model.predict(resized_img.reshape(1, -1)).tolist()
return create_tsne_graph(value, embedding_prediction)
@app.callback(Output("hover-point-graph", "src"), [Input("tsne-graph", "hoverData")])
def display_selected_point(hoverData):
if not hoverData:
return create_img(train_images[0])
idx = hoverData["points"][0]["customdata"]
return create_img(all_images[idx])
@app.callback(
[Output("selected-data-graph", "src"), Output("prediction", "children")],
[Input("tsne-graph", "clickData")],
)
def display_selected_point(clickData):
if not clickData:
raise dash.exceptions.PreventUpdate
idx = clickData["points"][0]["customdata"]
img = np.expand_dims(all_images[idx], axis=0)
prediction_array = model.predict(img)
prediction = np.argmax(prediction_array)
probability = np.round(prediction_array[0, prediction] * 100, 2)
ground_truth = all_labels[idx]
correct = prediction == ground_truth
if correct:
color = "green"
else:
color = "red"
return [
create_img(all_images[idx]),
[
f"prediction: {label_mapping[prediction]} ({probability}% certainty)",
html.Br(),
f"actual: {label_mapping[ground_truth]}",
],
]
if __name__ == "__main__":
app.run_server(debug=False)
```
#### File: apps/dash-floris-gch/app.py
```python
import base64
from io import BytesIO
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import floris.tools as wfct
import matplotlib.pyplot as plt
import reusable_components as rc # see reusable_components.py
# ############ Create helper functions ############
def mpl_to_b64(fig, format="png", dpi=300, **kwargs):
b_io = BytesIO()
fig.savefig(b_io, format=format, bbox_inches="tight", dpi=dpi, **kwargs)
b64_enc = base64.b64encode(b_io.getvalue()).decode("utf-8")
return f"data:image/{format};base64," + b64_enc
def build_visualizations(x_loc, y_loc, yaw_1, wd, gch, minSpeed=4, maxSpeed=8.0):
fi = wfct.floris_interface.FlorisInterface("./data/example_input.json")
fi.set_gch(gch)
fi.reinitialize_flow_field(
wind_direction=wd, layout_array=((0, 126 * 7, 126 * 14), (0, 0, 0))
)
fi.calculate_wake(yaw_angles=[yaw_1, 0, 0])
# Horizontal plane
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(
fi.get_hor_plane(), ax=ax, minSpeed=minSpeed, maxSpeed=maxSpeed
)
ax.axhline(y_loc, color="w", ls="--", lw=1)
ax.axvline(x_loc, color="w", ls="--", lw=1)
horiz_b64 = mpl_to_b64(fig)
plt.close(fig)
# Cross (x-normal) plane
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(
fi.get_cross_plane(x_loc=x_loc), ax=ax, minSpeed=minSpeed, maxSpeed=maxSpeed
)
wfct.visualization.reverse_cut_plane_x_axis_in_plot(ax)
x_plane_b64 = mpl_to_b64(fig)
plt.close(fig)
# Cross (y-normal) plane
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(
fi.get_y_plane(y_loc=y_loc), ax=ax, minSpeed=minSpeed, maxSpeed=maxSpeed
)
wfct.visualization.reverse_cut_plane_x_axis_in_plot(ax)
y_plane_b64 = mpl_to_b64(fig)
plt.close(fig)
return horiz_b64, x_plane_b64, y_plane_b64
# ############ Initialize app ############
app = dash.Dash(__name__, external_stylesheets=[rc.MATERALIZE_CSS])
server = app.server
# ############ Build components and layouts ############
navbar = html.Nav(
html.Div(
className="nav-wrapper teal",
children=[
html.Img(
src=app.get_asset_url("dash-logo.png"),
style={"float": "right", "height": "100%", "padding-right": "15px"},
),
html.A(
"GCH and Cut Plane Visualization in FLORIS",
className="brand-logo",
href="https://plotly.com/dash/",
style={"padding-left": "15px"},
),
],
)
)
controls = [
rc.CustomSlider(id="wind-direction", min=250, max=290, label="Wind Direction"),
rc.CustomSlider(id="yaw-angle", min=-30, max=30, label="Yaw angle T1"),
rc.CustomSlider(
id="x-loc", min=0, max=3000, value=500, label="X Normal Plane Intercept"
),
rc.CustomSlider(id="y-loc", min=-100, max=100, label="Y Normal Plane Intercept"),
]
left_section = rc.Card(
rc.CardContent(
[
rc.CardTitle("Horizontal Cut Plane"),
html.Img(id="gch-horizontal", style={"width": "100%"}),
rc.CardTitle("Cross (X-Normal) Cut Plane"),
html.Img(id="gch-x-normal", style={"width": "100%"}),
rc.CardTitle("Cross (Y-Normal) Cut Plane"),
html.Img(id="gch-y-normal", style={"width": "100%"}),
]
)
)
right_section = rc.Card(
rc.CardContent(
[
rc.CardTitle("Horizontal Cut Plane"),
html.Img(id="no-gch-horizontal", style={"width": "100%"}),
rc.CardTitle("Cross (X-Normal) Cut Plane"),
html.Img(id="no-gch-x-normal", style={"width": "100%"}),
rc.CardTitle("Cross (Y-Normal) Cut Plane"),
html.Img(id="no-gch-y-normal", style={"width": "100%"}),
]
)
)
app.layout = html.Div(
style={"--slider_active": "teal"},
# className="container",
children=[
navbar,
html.Br(),
rc.Row(
rc.Col(
rc.Card(rc.CardContent(rc.Row([rc.Col(c, width=3) for c in controls]))),
width=12,
)
),
rc.Row(
[
rc.Col([html.H4("Results with GCH"), left_section], width=6),
rc.Col([html.H4("Results without GCH"), right_section], width=6),
]
),
],
)
@app.callback(
Output("gch-horizontal", "src"),
Output("gch-x-normal", "src"),
Output("gch-y-normal", "src"),
Input("x-loc", "value"),
Input("y-loc", "value"),
Input("yaw-angle", "value"),
Input("wind-direction", "value"),
)
def gch_update(x_loc, y_loc, yaw_1, wd):
return build_visualizations(x_loc, y_loc, yaw_1, wd, gch=True)
@app.callback(
Output("no-gch-horizontal", "src"),
Output("no-gch-x-normal", "src"),
Output("no-gch-y-normal", "src"),
Input("x-loc", "value"),
Input("y-loc", "value"),
Input("yaw-angle", "value"),
Input("wind-direction", "value"),
)
def no_gch_update(x_loc, y_loc, yaw_1, wd):
return build_visualizations(x_loc, y_loc, yaw_1, wd, gch=False)
if __name__ == "__main__":
app.run_server(debug=True, threaded=False, processes=2)
```
#### File: apps/dash-interest-rate/utils.py
```python
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
def Header(name, app):
title = html.H2(name, style={"margin-top": 7})
logo = html.Img(
src=app.get_asset_url("dash-logo.png"), style={"float": "right", "height": 60}
)
link = html.A(logo, href="https://plotly.com/dash/")
btn_style = {"margin-top": "13px", "float": "right", "margin-right": "10px"}
demo_btn = html.A(
dbc.Button("Enterprise Demo", style=btn_style, color="primary"),
href="https://plotly.com/get-demo/",
)
code_btn = html.A(
dbc.Button("Source Code", style=btn_style, color="secondary"),
href="https://github.com/plotly/dash-sample-apps/tree/main/apps/dash-interest-rate",
)
return dbc.Row([dbc.Col(title, md=7), dbc.Col([link, demo_btn, code_btn], md=5)])
def OptionMenu(values, label, **kwargs):
options = [{"label": s.replace("_", " ").capitalize(), "value": s} for s in values]
kwargs["value"] = kwargs.get("value", values[0])
if len(options) <= 4:
component = dbc.RadioItems
kwargs["inline"] = True
else:
component = dbc.Select
return dbc.FormGroup([dbc.Label(label), component(options=options, **kwargs)])
def CustomRangeSlider(values, label, **kwargs):
values = sorted(values)
marks = {i: f"{i//1000}k" for i in values}
return dbc.FormGroup(
[
dbc.Label(label),
dcc.RangeSlider(
min=values[0],
max=values[-1],
step=1000,
value=[values[1], values[-2]],
marks=marks,
**kwargs,
),
]
)
def get_unique(connection, db, col):
query = f"""
SELECT DISTINCT {col}
FROM {db}.PUBLIC.LOAN_CLEAN;
"""
return [x[0] for x in connection.execute(query).fetchall()]
def get_range(connection, db, col):
query = f"""
SELECT MIN({col}), MAX({col})
FROM {db}.PUBLIC.LOAN_CLEAN;
"""
return connection.execute(query).fetchall()[0]
```
#### File: apps/dash-live-model-training/demo_utils.py
```python
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output, State
import pathlib
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
def demo_explanation(demo_mode):
if demo_mode:
# Markdown files
with open(PATH.joinpath("demo.md"), "r") as file:
demo_md = file.read()
return html.Div(
html.Div([dcc.Markdown(demo_md, className="markdown")]),
style={"margin": "10px"},
)
def demo_callbacks(app, demo_mode):
if demo_mode:
@app.server.before_first_request
def load_demo_run_logs():
global data_dict, demo_md
names = [
"step",
"train accuracy",
"val accuracy",
"train cross entropy",
"val cross entropy",
]
data_dict = {
"softmax": {
"cifar": pd.read_csv(
DATA_PATH.joinpath("cifar_softmax_run_log.csv"), names=names
),
"mnist": pd.read_csv(
DATA_PATH.joinpath("mnist_softmax_run_log.csv"), names=names
),
"fashion": pd.read_csv(
DATA_PATH.joinpath("fashion_softmax_run_log.csv"), names=names
),
},
"cnn": {
"cifar": pd.read_csv(
DATA_PATH.joinpath("cifar_cnn_run_log.csv"), names=names
),
"mnist": pd.read_csv(
DATA_PATH.joinpath("mnist_cnn_run_log.csv"), names=names
),
"fashion": pd.read_csv(
DATA_PATH.joinpath("fashion_cnn_run_log.csv"), names=names
),
},
}
@app.callback(
Output("storage-simulated-run", "data"),
[Input("interval-simulated-step", "n_intervals")],
[
State("dropdown-demo-dataset", "value"),
State("dropdown-simulation-model", "value"),
],
)
def simulate_run(n_intervals, demo_dataset, simulation_model):
if simulation_model and demo_dataset and n_intervals > 0:
step = n_intervals * 5
run_logs = data_dict[simulation_model][demo_dataset]
run_below_steps = run_logs[run_logs["step"] <= step]
json = run_below_steps.to_json(orient="split")
return json
@app.callback(
Output("interval-simulated-step", "n_intervals"),
[
Input("dropdown-demo-dataset", "value"),
Input("dropdown-simulation-model", "value"),
],
)
def reset_interval_simulated_step(*_):
return 0
@app.callback(
Output("run-log-storage", "data"),
[Input("interval-log-update", "n_intervals")],
[State("storage-simulated-run", "data")],
)
def get_run_log(_, simulated_run):
if simulate_run:
return simulated_run
@app.callback(
Output("div-total-step-count", "children"),
[Input("dropdown-demo-dataset", "value")],
)
def total_step_count(dataset_name):
if dataset_name is not None:
dataset = data_dict["softmax"][dataset_name]
return html.H6(
f"Total Steps: {dataset['step'].iloc[-1]}",
style={"margin-top": "3px", "float": "right"},
)
```
#### File: dash-live-model-training/examples/cifar_deep_modified.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Modified Import
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.transform import rescale
from skimage import color
from tfutils import write_data
from sklearn.preprocessing import OneHotEncoder
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is three here, since images are
# rgb -- it would be 1 for a grayscale image, 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, 32, 32, 3])
# Convolutional layers 1 and 2 - maps 3-color image to 32 feature maps.
W_conv1 = weight_variable([3, 3, 3, 32]) # 3x3 filters
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
W_conv2 = weight_variable([3, 3, 32, 32])
b_conv2 = bias_variable([32])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
# Pooling layer - downsamples by 2X.
h_pool2 = max_pool_2x2(h_conv2)
# Dropout
h_pool2_drop = tf.nn.dropout(h_pool2, 0.75)
# Convolutional layers 3 and 4 - maps 32 feature maps to 64.
W_conv3 = weight_variable([3, 3, 32, 64]) # 3x3 filters
b_conv3 = bias_variable([64])
h_conv3 = tf.nn.relu(conv2d(h_pool2_drop, W_conv3) + b_conv3)
W_conv4 = weight_variable([3, 3, 64, 64]) # 3x3 filters
b_conv4 = bias_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
# Second pooling layer.
h_pool4 = max_pool_2x2(h_conv4)
# Dropout
h_pool4_drop = tf.nn.dropout(h_pool4, 0.75)
# Fully connected layer 1 -- after 2 round of downsampling, our 32x32 image
# is down to 8x8x64 feature maps -- maps this to 512 features.
W_fc1 = weight_variable([8 * 8 * 64, 512])
b_fc1 = bias_variable([512])
h_pool4_flat = tf.reshape(h_pool4_drop, [-1, 8 * 8 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 512 features to 10 classes, one for each digit
W_fc2 = weight_variable([512, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
print("Starting to generate CIFAR10 images.")
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = np.moveaxis(x_train, 1, 3) / 255.0 # Normalize values
x_train_vec = x_train.reshape(50000, -1)
x_test = np.moveaxis(x_test, 1, 3) / 255.0 # Normalize values
x_test_vec = x_test.reshape(10000, -1)
X_train, X_val, y_train, y_val = train_test_split(
x_train_vec, y_train, test_size=0.1, random_state=42
)
print("Finished generating CIFAR10 images.")
# Create the model
x = tf.placeholder(tf.float32, [None, 32 * 32 * 3])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv)
)
train_step = tf.train.AdamOptimizer(1e-4).minimize(
cross_entropy
) # RMS is used in keras example, Adam is better
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
y_train = OneHotEncoder(sparse=False).fit_transform(y_train)
y_val = OneHotEncoder(sparse=False).fit_transform(y_val)
sess.run(tf.global_variables_initializer())
for i in range(20001):
start_train = i * 50 % y_train.shape[0]
end_train = start_train + 50
start_val = i * 50 % y_val.shape[0]
end_val = start_val + 50
batch = (X_train[start_train:end_train], y_train[start_train:end_train])
batch_val = (X_val[start_val:end_val], y_val[start_val:end_val])
feed_dict_train = {x: batch[0], y_: batch[1], keep_prob: 1.0}
feed_dict_val = {x: batch_val[0], y_: batch_val[1], keep_prob: 1.0}
# Writes data into run log csv file
write_data(
accuracy=accuracy,
cross_entropy=cross_entropy,
feed_dict_train=feed_dict_train,
feed_dict_val=feed_dict_val,
step=i,
)
if i % 100 == 0:
train_accuracy = accuracy.eval(
feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}
)
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(
"test accuracy %g"
% accuracy.eval(feed_dict={x: x_test_vec, y_: y_test, keep_prob: 1.0})
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/tensorflow/mnist/input_data",
help="Directory for storing input data",
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
```
#### File: dash-peaky-finders/peaky_finders/predictor.py
```python
from typing import List
import datetime as dt
from datetime import timedelta
import requests
import os
import pickle
from typing import Dict, Tuple
import geopandas as gpd
import pandas as pd
import pytz, datetime
from shapely import wkt
from timezonefinderL import TimezoneFinder
from peaky_finders.data_acquisition.train_model import (
LoadCollector,
GEO_COORDS,
CATEGORICAL_FEATURES,
MONTH_TO_SEASON,
)
from peaky_finders.training_pipeline import MODEL_OUTPUT_DIR, MODEL_INPUT_DIR
from peaky_finders.data_acquisition.train_model import GEO_COORDS
ISO_MAP_IDS = {
56669: "MISO",
14725: "PJM",
2775: "CAISO",
13434: "ISONE",
13501: "NYISO",
}
ISO_LIST = ["NYISO", "ISONE", "PJM", "MISO", "CAISO"]
PEAK_DATA_PATH = os.path.join(os.path.dirname(__file__), "historical_peaks")
tz_finder = TimezoneFinder()
def get_iso_map():
iso_df = pd.read_csv("iso_map_final.csv")
iso_df["geometry"] = iso_df["geometry"].apply(wkt.loads)
iso_gdf = gpd.GeoDataFrame(iso_df, crs="EPSG:4326", geometry="geometry")
return iso_gdf
class Predictor:
def __init__(self, iso_name: str, start: str, end: str) -> None:
self.start = start
self.end = end
self.iso_name = iso_name
self.load_collector: LoadCollector = None
def get_load(self, start: str, end: str):
self.load_collector = LoadCollector(self.iso_name, start, end)
def featurize(self):
self.load_collector.engineer_features()
def add_future(self, load: pd.Series) -> pd.Series:
future = pd.date_range(
start=load.index[-1], end=(load.index[-1] + timedelta(days=1)), freq="H"
).to_frame(name="load_MW")
tz_finder = TimezoneFinder()
lon = float(GEO_COORDS[self.iso_name]["lon"])
lat = float(GEO_COORDS[self.iso_name]["lat"])
tz_name = tz_finder.timezone_at(lng=lon, lat=lat)
future["load_MW"] = None
future.index = future.index.tz_convert(tz_name)
return future
def prepare_predictions(self):
self.get_load(self.start, self.end)
load = self.load_collector.load
self.load_collector.engineer_features()
model_input = self.load_collector.load.copy()
for feature in CATEGORICAL_FEATURES:
dummies = pd.get_dummies(
model_input[feature], prefix=feature, drop_first=True
)
model_input = model_input.drop(feature, axis=1)
model_input = pd.concat([model_input, dummies], axis=1)
return model_input, load
def predict_load(self, model_input: pd.DataFrame) -> Tuple[pd.Series, pd.Series]:
model_path = os.path.join(
MODEL_OUTPUT_DIR, (f"xg_boost_{self.iso_name}_load_model.pkl")
)
xgb = pickle.load(open(model_path, "rb"))
if "holiday_True" not in model_input.columns:
model_input["holiday_True"] = 0
X = model_input.drop("load_MW", axis=1).astype(float).dropna()
weekday_cols = [f"weekday_{i + 1}" for i in range(0, 6)]
if len(set(weekday_cols) - set(X.columns)) > 0:
for col in list(set(weekday_cols) - set(X.columns)):
X[col] = 0
predictions = xgb.predict(X[xgb.get_booster().feature_names])
X["predicted_load"] = predictions
return X["predicted_load"]
def predict_load(self,):
for iso in ISO_LIST:
model_input_path = os.path.join(MODEL_INPUT_DIR, MODEL_INPUT_DATA[iso])
model_path = os.path.join(
MODEL_OUTPUT_DIR, (f"xg_boost_{self.iso_name}_load_model.pkl")
)
def predict_all(iso_list: list, start: str, end: str) -> Tuple[Dict[str, pd.DataFrame]]:
historical_vs_predicted = {}
for iso in iso_list:
predictor = Predictor(iso, start, end)
model_input, historical_load = predictor.prepare_predictions()
predictions = predictor.predict_load(model_input)
comparison_df = pd.concat([model_input, predictions], axis=1)[
["load_MW", "predicted_load"]
]
historical_vs_predicted[iso] = comparison_df
return historical_vs_predicted
def get_peak_data(iso_list: list) -> Tuple[Dict[str, pd.DataFrame]]:
peak_data = {}
for iso in iso_list:
path = "https://raw.githubusercontent.com/kbaranko/peaky-finders/master/peaky_finders/historical_peaks"
iso_data = pd.read_csv(
f"{path}/{iso}_historical_peaks.csv", parse_dates=["timestamp"]
)
iso_data["timestamp"] = iso_data["timestamp"].apply(
lambda x: x.astimezone(pytz.utc)
)
tz_name = tz_finder.timezone_at(
lng=float(GEO_COORDS[iso]["lon"]), lat=float(GEO_COORDS[iso]["lat"])
)
iso_data.index = pd.DatetimeIndex(iso_data["timestamp"])
iso_data.index = iso_data.index.tz_convert(tz_name)
peak_data[iso] = iso_data
return peak_data
def get_temperature_forecast(iso: str) -> dict:
lon = GEO_COORDS[iso]["lon"]
lat = GEO_COORDS[iso]["lat"]
API_KEY = os.environ["DARKSKY_KEY"]
url = f"https://api.darksky.net/forecast/{API_KEY}/{lat},{lon}"
response = requests.get(url)
if response.status_code == 200:
print(response.status_code)
else:
raise ValueError(
f"Error getting data from DarkSky API: "
f"Response Code {response.status_code}"
)
info = response.json()
hourly_data = info["hourly"]["data"]
hourly_temp = {}
for info in hourly_data:
timestamp = datetime.datetime.fromtimestamp(info["time"])
tz = tz_finder.timezone_at(lng=float(lon), lat=float(lat))
timestamp = timestamp.astimezone(pytz.timezone(tz))
hourly_temp[timestamp] = info["temperature"]
return hourly_temp
def create_load_duration(peak_data: Dict[str, pd.DataFrame]) -> Dict[str, pd.Series]:
load_duration_curves = {}
for iso in ISO_LIST:
load_duration_curves[iso] = pd.Series(
peak_data[iso]["load_MW"].values
).sort_values(ascending=False)
return load_duration_curves
def get_forecasts(iso_list: List[str]):
predictions = {}
historical_load = {}
temperature = {}
for iso in iso_list:
path = f"https://raw.githubusercontent.com/kbaranko/peaky-finders/master/peaky_finders/forecasts/{iso}_forecasts.csv"
iso_data = pd.read_csv(path, parse_dates=["timestamp"])
iso_data["timestamp"] = iso_data["timestamp"].apply(
lambda x: x.astimezone(pytz.utc)
)
tz_name = tz_finder.timezone_at(
lng=float(GEO_COORDS[iso]["lon"]), lat=float(GEO_COORDS[iso]["lat"])
)
iso_data.index = pd.DatetimeIndex(iso_data["timestamp"])
iso_data.index = iso_data.index.tz_convert(tz_name)
historical_load[iso] = iso_data["load_MW"]
predictions[iso] = iso_data["predicted_load"]
temperature[iso] = iso_data["temperature"]
return predictions, historical_load, temperature
```
#### File: apps/dash-pileup-demo/app.py
```python
import os
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_bio
import pandas as pd
import numpy as np
import math
import plotly.graph_objects as go
from layout_helper import run_standalone_app
text_style = {"color": "#506784", "font-family": "Open Sans"}
_COMPONENT_ID = "pileup-browser"
def description():
return "An interactive in-browser track viewer."
def azure_url(file):
return os.path.join(
"https://sampleappsdata.blob.core.windows.net/dash-pileup-demo/rna/", file
)
def header_colors():
return {
"bg_color": "#0F5BA7",
"font_color": "white",
}
def rna_differential(app):
basal_lactate = {
"url": azure_url("SRR1552454.fastq.gz.sampled.bam"),
"indexUrl": azure_url("SRR1552454.fastq.gz.sampled.bam.bai"),
}
luminal_lactate = {
"url": azure_url("SRR1552448.fastq.gz.sampled.bam"),
"indexUrl": azure_url("SRR1552448.fastq.gz.sampled.bam.bai"),
}
HOSTED_TRACKS = {
"range": {"contig": "chr1", "start": 54986297, "stop": 54991347},
"celltype": [
{"viz": "scale", "label": "Scale"},
{"viz": "location", "label": "Location"},
{
"viz": "genes",
"label": "genes",
"source": "bigBed",
"sourceOptions": {"url": azure_url("mm10.ncbiRefSeq.sorted.bb")},
},
{
"viz": "coverage",
"label": "Basal",
"source": "bam",
"sourceOptions": basal_lactate,
},
{
"viz": "pileup",
"vizOptions": {"viewAsPairs": True},
"label": "Basal",
"source": "bam",
"sourceOptions": basal_lactate,
},
{
"viz": "coverage",
"label": "Luminal",
"source": "bam",
"sourceOptions": luminal_lactate,
},
{
"viz": "pileup",
"label": "Luminal",
"source": "bam",
"sourceOptions": luminal_lactate,
},
],
}
return HOSTED_TRACKS
REFERENCE = {
"label": "mm10",
"url": "https://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/mm10.2bit",
}
DATAPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets/data")
# Differentially expressed genes (identified in R, see assets/data/rna/README.md)
DE_dataframe = pd.read_csv(azure_url("DE_genes.csv"))
# filter for the cell type condition
DE_dataframe = DE_dataframe[
DE_dataframe["Comparison"] == "luminal__v__basal"
].reset_index()
# add SNP column
DE_dataframe["SNP"] = "NA"
# get min and max effect sizes
df_min = math.floor(min(DE_dataframe["log2FoldChange"]))
df_max = math.ceil(max(DE_dataframe["log2FoldChange"]))
def layout(app):
HOSTED_CASE_DICT = rna_differential(app)
return html.Div(
id="pileup-body",
className="app-body",
children=[
html.Div(
id="pileup-control-tabs",
className="control-tabs",
children=[
dcc.Tabs(
id="pileup-tabs",
value="data",
children=[
dcc.Tab(
label="Volcano plot",
value="data",
children=html.Div(
className="control-tab",
children=[
"Effect Size",
dcc.RangeSlider(
id="pileup-volcanoplot-input",
min=df_min,
max=df_max,
step=None,
marks={
i: {"label": str(i)}
for i in range(df_min, df_max + 1, 2)
},
value=[-1, 1],
),
html.Br(),
dcc.Graph(
id="pileup-dashbio-volcanoplot",
figure=dash_bio.VolcanoPlot(
dataframe=DE_dataframe,
margin=go.layout.Margin(l=0, r=0, b=0),
legend={
"orientation": "h",
"yanchor": "bottom",
"y": 1.02,
"bgcolor": "#f2f5fa",
},
effect_size="log2FoldChange",
effect_size_line=[-1, 1],
title="Differentially Expressed Genes",
genomewideline_value=-np.log10(0.05),
p="padj",
snp="SNP",
gene="Gene",
),
),
],
),
),
dcc.Tab(
label="About this tutorial",
value="description",
children=html.Div(
className="control-tab",
children=[
html.H4(
className="description",
children="""Visualizing RNA-seq data with pileup.js
and volcano plots""",
),
dcc.Markdown(
"""
In this example, we use the pileup.js and volcano plot components from dash-bio
to visualize two RNA-sequencing
(RNA-seq) samples from two conditions. RNA-seq allows us to learn how the expression
of genes changes between different samples of interest. Here, we are looking at
RNA-seq from two samples that are taken from two different mouse cell types.
We refer to these different cell types as basal and luminal cell types.
On the right, we use pileup.js to visualize aligned reads from RNA-seq samples.
On the left, we have a volcano plot, that visualizes the magnitude of change
in gene expression between the two samples. On the x-axis, the `Effect Size`
indicates the log2 fold change in expression
between the two conditions. On the y-axis, `-log10(p)` indicates the -log10(p-value)
for each gene. This p-value, along with the effect size,
can help determine whether each gene is significantly
differentially expressed between the conditions of interest.
To explore a gene, you can click on a gene in the volcano plot. After clicking on
a gene, the genomic region overlapping that gene will show up in the pileup.js
browser on the right. Now, you can investigate RNA-seq alignments at each
gene of interest. You may notice that genes with a negative effect size in the volcano
plot have more RNA-seq reads in the top sample (the basal cell type), while genes
with a positive effect size have more reads in the bottom sample
(the luminal cell type).
"""
),
],
),
),
dcc.Tab(
label="About pileup.js",
value="what-is",
children=html.Div(
className="control-tab",
children=[
html.H4(
className="what-is",
children="What is pileup.js?",
),
dcc.Markdown(
"""
The Dash pileup.js component is a high-performance genomics
data visualization component developed originally by the Hammer Lab
(https://github.com/hammerlab/pileup.js). pileup.js
supports visualization of genomic file formats, such as vcf,
bam, and bigbed files. pileup.js additionally allows flexible
interaction with non-standard data formats. Users can visualize
GA4GH JSON formatted alignments, features and variants. Users can
also connect with and visualize data stored in GA4GH formatted data
stores.
"""
),
],
),
),
],
)
],
),
dcc.Loading(
parent_className="dashbio-loading",
id="pileup-output",
children=html.Div(
[
dash_bio.Pileup(
id=_COMPONENT_ID,
range=HOSTED_CASE_DICT["range"],
reference=REFERENCE,
tracks=HOSTED_CASE_DICT["celltype"],
)
]
),
),
],
)
def callbacks(_app):
HOSTED_CASE_DICT = rna_differential(_app)
@_app.callback(
Output("pileup-dashbio-volcanoplot", "figure"),
[Input("pileup-volcanoplot-input", "value")],
)
def update_volcano(effects):
return dash_bio.VolcanoPlot(
dataframe=DE_dataframe,
margin=go.layout.Margin(l=0, r=0, b=0),
legend={"orientation": "h", "yanchor": "bottom", "y": 1.02, "x": 0.0,},
effect_size="log2FoldChange",
effect_size_line=effects,
title="Differentially Expressed Genes",
genomewideline_value=-np.log10(0.05),
p="padj",
snp="SNP",
gene="Gene",
)
@_app.callback(
Output(_COMPONENT_ID, "range"), Input("pileup-dashbio-volcanoplot", "clickData")
)
def update_range(point):
if point is None:
range = HOSTED_CASE_DICT["range"]
else:
# get genomic location of selected genes and goto
pointText = point["points"][0]["text"]
gene = pointText.split("GENE: ")[-1]
row = DE_dataframe[DE_dataframe["Gene"] == gene].iloc[0]
range = {"contig": row["chr"], "start": row["start"], "stop": row["end"]}
return range
app = run_standalone_app(layout, callbacks, header_colors, __file__)
server = app.server
if __name__ == "__main__":
app.run_server(debug=True, port=8050)
```
#### File: apps/dash-pivottable/app.py
```python
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_pivottable
from data import data
def Header(name, app):
img_style = {"float": "right", "height": 40, "margin-right": 10}
dash_logo = html.Img(src=app.get_asset_url("dash.png"), style=img_style)
ghub_logo = html.Img(src=app.get_asset_url("github.png"), style=img_style)
return html.Div(
[
html.H1(name, style={"margin": 10, "display": "inline"}),
html.A(dash_logo, href="https://plotly.com/dash/"),
html.A(ghub_logo, href="https://github.com/plotly/dash-pivottable"),
html.A(
html.Button(
"Enterprise Demo",
style={
"float": "right",
"margin-right": "10px",
"margin-top": "5px",
"padding": "5px 10px",
"font-size": "15px",
},
),
href="https://plotly.com/get-demo/",
),
html.Hr(),
]
)
app = dash.Dash(__name__)
app.title = "Dash Pivottable"
server = app.server
app.layout = html.Div(
[
Header("Dash Pivottable", app),
dash_pivottable.PivotTable(
id="table",
data=data,
cols=["Day of Week"],
colOrder="key_a_to_z",
rows=["Party Size"],
rowOrder="key_a_to_z",
rendererName="Grouped Column Chart",
aggregatorName="Average",
vals=["Total Bill"],
valueFilter={"Day of Week": {"Thursday": False}},
),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
[
Input("table", "cols"),
Input("table", "rows"),
Input("table", "rowOrder"),
Input("table", "colOrder"),
Input("table", "aggregatorName"),
Input("table", "rendererName"),
],
)
def display_props(cols, rows, row_order, col_order, aggregator, renderer):
return [
html.P(str(cols), id="columns"),
html.P(str(rows), id="rows"),
html.P(str(row_order), id="row_order"),
html.P(str(col_order), id="col_order"),
html.P(str(aggregator), id="aggregator"),
html.P(str(renderer), id="renderer"),
]
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: app/ui/tab_comparison_controls.py
```python
import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_port_comparison_controls(
port_1_arr: list,
port_1_val: str,
port_2_arr: list,
port_2_val: str,
vessel_types_arr: list,
vessel_type_val: str,
) -> html.Div:
"""
Returns dropdown controls options for the Compare tab.
:param port_1_arr: list, possible values for the first port
:param port_1_val: str, current value for the first port
:param port_2_arr: list, possible values for the second port
:param port_2_val: str, current value for the second port
:param vessel_types_arr: list, possible values for the vessel types
:param vessel_type_val: str, current value for the vessel type
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-compare-port-1-dpd",
clearable=False,
options=[{"label": port, "value": port} for port in port_1_arr],
value=port_1_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-compare-vessel-type-dpd",
clearable=False,
options=[
{"label": vessel, "value": vessel}
for vessel in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container area-e",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT_COMPARE]
),
dcc.Dropdown(
id="port-compare-port-2-dpd",
clearable=False,
options=[{"label": port, "value": port} for port in port_2_arr],
value=port_2_val,
),
],
),
],
)
```
#### File: app/ui/tab_map_controls.py
```python
import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
```
#### File: apps/dash-salesforce-crm/index.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import sf_manager, app
from panels import opportunities, cases, leads
server = app.server
app.layout = html.Div(
[
html.Div(
className="row header",
children=[
html.Button(id="menu", children=dcc.Markdown("≡")),
html.Span(
className="app-title",
children=[
dcc.Markdown("**CRM App**"),
html.Span(
id="subtitle",
children=dcc.Markdown("  using Salesforce API"),
style={"font-size": "1.8rem", "margin-top": "15px"},
),
],
),
html.Img(src=app.get_asset_url("logo.png")),
html.A(
id="learn_more",
children=html.Button("Learn More"),
href="https://plot.ly/dash/",
),
],
),
html.Div(
id="tabs",
className="row tabs",
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
html.Div(
id="mobile_tabs",
className="row tabs",
style={"display": "none"},
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
dcc.Store( # opportunities df
id="opportunities_df",
data=sf_manager.get_opportunities().to_json(orient="split"),
),
dcc.Store( # leads df
id="leads_df", data=sf_manager.get_leads().to_json(orient="split")
),
dcc.Store(
id="cases_df", data=sf_manager.get_cases().to_json(orient="split")
), # cases df
dcc.Location(id="url", refresh=False),
html.Div(id="tab_content"),
html.Link(
href="https://use.fontawesome.com/releases/v5.2.0/css/all.css",
rel="stylesheet",
),
html.Link(
href="https://fonts.googleapis.com/css?family=Dosis", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Open+Sans", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Ubuntu", rel="stylesheet"
),
],
className="row",
style={"margin": "0%"},
)
# Update the index
@app.callback(
[
Output("tab_content", "children"),
Output("tabs", "children"),
Output("mobile_tabs", "children"),
],
[Input("url", "pathname")],
)
def display_page(pathname):
tabs = [
dcc.Link("Opportunities", href="/dash-salesforce-crm/opportunities"),
dcc.Link("Leads", href="/dash-salesforce-crm/leads"),
dcc.Link("Cases", href="/dash-salesforce-crm/cases"),
]
if pathname == "/dash-salesforce-crm/opportunities":
tabs[0] = dcc.Link(
dcc.Markdown("**■ Opportunities**"),
href="/dash-salesforce-crm/opportunities",
)
return opportunities.layout, tabs, tabs
elif pathname == "/dash-salesforce-crm/cases":
tabs[2] = dcc.Link(
dcc.Markdown("**■ Cases**"), href="/dash-salesforce-crm/cases"
)
return cases.layout, tabs, tabs
tabs[1] = dcc.Link(
dcc.Markdown("**■ Leads**"), href="/dash-salesforce-crm/leads"
)
return leads.layout, tabs, tabs
@app.callback(
Output("mobile_tabs", "style"),
[Input("menu", "n_clicks")],
[State("mobile_tabs", "style")],
)
def show_menu(n_clicks, tabs_style):
if n_clicks:
if tabs_style["display"] == "none":
tabs_style["display"] = "flex"
else:
tabs_style["display"] = "none"
return tabs_style
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-soccer-analytics/fig_generator.py
```python
import plotly.io as pio
# This script takes the motion_graph output json file and converts it back into a figure which can be
# displayed as an animated graph using the main app.py Dash app
def fig_from_json(filename):
with open(filename, "r") as f:
fig = pio.from_json(f.read())
return fig
```
#### File: apps/dash-spatial-clustering/helpers.py
```python
import json
import os
import shutil
import time
import pandas as pd
import numpy as np
import pathlib
import geopandas as gpd
import pysal as ps
from sklearn import cluster
from sklearn.preprocessing import scale
# Data reading & Processing
app_path = pathlib.Path(__file__).parent.resolve()
data_path = pathlib.Path(__file__).parent.joinpath("data")
geo_json_path = data_path.joinpath("Zipcodes.geojson")
austin_listings = pd.read_csv(
"https://raw.githubusercontent.com/plotly/datasets/master/dash-sample-apps/dash-spatial-clustering/data/listings.csv",
low_memory=False,
)
# Refractor zipcode outlier, modify in place
zip_outlier = austin_listings[austin_listings["zipcode"] == "TX 78702"].index
austin_listings.loc[zip_outlier, "zipcode"] = "78702"
austin_listings = austin_listings.dropna(axis=0, subset=["zipcode"])
review_columns = [c for c in austin_listings.columns if "review_" in c]
# Geojson loading
with open(geo_json_path) as response:
zc_link = json.load(response)
# Add id for choropleth layer
for feature in zc_link["features"]:
feature["id"] = feature["properties"]["zipcode"]
listing_zipcode = austin_listings["zipcode"].unique()
def apply_clustering():
"""
# Apply KMeans clustering to group zipcodes into categories based on type of houses listed(i.e. property type)
:return: Dataframe.
db: scaled proportions of house types by zipcode, use for plotting Choropleth map layer.
barh_df : scaled proportion of house type grouped by cluster, use for prop type chart and review chart.
"""
variables = ["bedrooms", "bathrooms", "beds"]
aves = austin_listings.groupby("zipcode")[variables].mean()
review_aves = austin_listings.groupby("zipcode")[review_columns].mean()
types = pd.get_dummies(austin_listings["property_type"])
prop_types = types.join(austin_listings["zipcode"]).groupby("zipcode").sum()
prop_types_pct = (prop_types * 100.0).div(prop_types.sum(axis=1), axis=0)
aves_props = aves.join(prop_types_pct)
# Standardize a dataset along any axis, Center to the mean and component wise scale to unit variance.
db = pd.DataFrame(
scale(aves_props), index=aves_props.index, columns=aves_props.columns
).rename(lambda x: str(x))
# Apply clustering on scaled df
km5 = cluster.KMeans(n_clusters=5)
km5cls = km5.fit(db.reset_index().values)
# print(len(km5cls.labels_))
db["cl"] = km5cls.labels_
# sort by labels since every time cluster is running, label 0-4 is randomly assigned
db["count"] = db.groupby("cl")["cl"].transform("count")
db.sort_values("count", inplace=True, ascending=True)
barh_df = prop_types_pct.assign(cl=km5cls.labels_).groupby("cl").mean()
# Join avg review columns for updating review plot
db = db.join(review_aves)
grouped = db.groupby("cl")[review_columns].mean()
barh_df = barh_df.join(grouped)
return db.reset_index(), barh_df
def rating_clustering(threshold):
start = time.time()
# Explore boundaries/ areas where customers are have similar ratings. Different from
# predefined number of output regions, it takes target variable(num of reviews, and
# apply a minimum threshold (5% per region) on it.
# Bring review columns at zipcode level
rt_av = austin_listings.groupby("zipcode")[review_columns].mean().dropna()
# Regionalization requires building of spatial weights
zc = gpd.read_file(geo_json_path)
zrt = zc[["geometry", "zipcode"]].join(rt_av, on="zipcode").dropna()
zrt.to_file("tmp")
w = ps.queen_from_shapefile("tmp/tmp.shp", idVariable="zipcode")
# Remove temp tmp/* we created for spatial weights
if os.path.isdir(os.path.join(app_path, "tmp")):
print("removing tmp folder")
shutil.rmtree(os.path.join(app_path, "tmp"))
# Impose that every resulting region has at least 5% of the total number of reviews
n_review = (
austin_listings.groupby("zipcode")
.sum()["number_of_reviews"]
.rename(lambda x: str(int(x)))
.reindex(zrt["zipcode"])
)
thr = np.round(int(threshold) / 100 * n_review.sum())
# Set the seed for reproducibility
np.random.seed(1234)
z = zrt.drop(["geometry", "zipcode"], axis=1).values
# Create max-p algorithm, note that this API is upgraded in pysal>1.11.1
maxp = ps.region.Maxp(w, z, thr, n_review.values[:, None], initial=100)
maxp.cinference(nperm=99)
# p value compared with randomly assigned region
p_value = maxp.cpvalue
print("p_value:", p_value)
lbls = pd.Series(maxp.area2region).reindex(zrt["zipcode"])
regionalization_df = (
pd.DataFrame(lbls).reset_index().rename(columns={"zipcode": "zipcode", 0: "cl"})
)
end = time.time()
# The larger threshold, the longer time it takes for computing
print(
"Computing threshold {}%".format(threshold),
"time cost for clustering: ",
end - start,
)
types = pd.get_dummies(austin_listings["property_type"])
prop_types = types.join(austin_listings["zipcode"]).groupby("zipcode").sum()
merged = pd.merge(
prop_types.reset_index(), regionalization_df, on="zipcode", how="inner"
)
d_merged = merged.drop(["zipcode", "cl"], axis=1)
prop_types_pct = (d_merged * 100.0).div(d_merged.sum(axis=1), axis=0)
pct_d = (
prop_types_pct.assign(cl=merged["cl"], zipcode=merged["zipcode"])
.groupby("cl")
.mean()
)
zrt = zrt[review_columns].groupby(lbls.values).mean()
joined_prop = pct_d.join(zrt)
return regionalization_df, p_value, joined_prop
# #
# rating = rating_clustering(5)
```
#### File: apps/dash-stitching/registration.py
```python
import numpy as np
from skimage import io, measure, feature
from scipy import ndimage
def autocrop(img):
"""
Remove zero-valued rectangles at the border of the image.
Parameters
----------
img: ndarray
Image to be cropped
"""
slices = ndimage.find_objects(img > 0)[0]
return img[slices]
def _blending_mask(shape):
mask = np.zeros(shape, dtype=np.int)
mask[1:-1, 1:-1] = 1
return ndimage.distance_transform_cdt(mask) + 1
def register_tiles(
imgs,
n_rows,
n_cols,
overlap_global=None,
overlap_local=None,
pad=None,
blending=True,
):
"""
Stitch together overlapping tiles of a mosaic, using Fourier-based
registration to estimate the shifts between neighboring tiles.
Parameters
----------
imgs: array of tiles, of shape (n_rows, n_cols, l_r, l_r) with (l_c, l_r)
the shape of individual tiles.
n_rows: int
number of rows of the mosaic.
n_cols : int
number of columns of the mosaic.
overlap_global : float
Fraction of overlap between tiles.
overlap_local : dictionary
Local overlaps between pairs of tiles. overlap_local[(i, j)] is a pair
of (x, y) shifts giving the 2D shift vector between tiles i and j.
Indices (i, j) are the raveled indices of the tile numbers.
pad : int
Value of the padding used at the border of the stitched image. An
autocrop is performed at the end to remove the unnecessary padding.
Notes
-----
Fourier-based registration is used in this function
(skimage.feature.register_translation).
"""
if pad is None:
pad = 200
l_r, l_c = imgs.shape[2:4]
if overlap_global is None:
overlap_global = 0.15
overlap_value = int(float(overlap_global) * l_r)
imgs = imgs.astype(np.float)
if blending:
blending_mask = _blending_mask((l_r, l_c))
else:
blending_mask = np.ones((l_r, l_c))
if imgs.ndim == 4:
canvas = np.zeros(
(2 * pad + n_rows * l_r, 2 * pad + n_cols * l_c), dtype=imgs.dtype
)
else:
canvas = np.zeros(
(2 * pad + n_rows * l_r, 2 * pad + n_cols * l_c, 3), dtype=imgs.dtype
)
blending_mask = np.dstack((blending_mask,) * 3)
weights = np.zeros_like(canvas)
init_r, init_c = pad, pad
weighted_img = imgs[0, 0] * blending_mask
canvas[init_r : init_r + l_r, init_c : init_c + l_c] = weighted_img
weights[init_r : init_r + l_r, init_c : init_c + l_c] = blending_mask
shifts = np.empty((n_rows, n_cols, 2), dtype=np.int)
shifts[0, 0] = init_r, init_c
for i_rows in range(n_rows):
# Shifts between rows
if i_rows >= 1:
index_target = np.ravel_multi_index((i_rows, 0), (n_rows, n_cols))
index_orig = index_target - n_cols
try:
overlap = overlap_local[(index_orig, index_target)]
except (KeyError, TypeError):
overlap = np.array([overlap_value, 0])
init_r, init_c = shifts[i_rows - 1, 0]
init_r += l_r
shift_vert = feature.register_translation(
imgs[i_rows - 1, 0, -overlap[0] :, : (l_c - overlap[1])],
imgs[i_rows, 0, : overlap[0], -(l_c - overlap[1]) :],
)[0]
init_r += int(shift_vert[0]) - overlap[0]
init_c += int(shift_vert[1]) - overlap[1]
shifts[i_rows, 0] = init_r, init_c
# Fill canvas and weights
weighted_img = imgs[i_rows, 0] * blending_mask
canvas[init_r : init_r + l_r, init_c : init_c + l_c] += weighted_img
weights[init_r : init_r + l_r, init_c : init_c + l_c] += blending_mask
# Shifts between columns
for j_cols in range(n_cols - 1):
index_orig = np.ravel_multi_index((i_rows, j_cols), (n_rows, n_cols))
index_target = index_orig + 1
try:
overlap = overlap_local[(index_orig, index_target)]
except (KeyError, TypeError):
overlap = np.array([0, overlap_value])
init_c += l_c
if overlap[0] < 0:
print("up")
row_start_1 = -(l_r + overlap[0])
row_end_1 = None
row_start_2 = None
row_end_2 = l_r + overlap[0]
else:
print("down")
row_start_1 = None
row_end_1 = l_r - overlap[0]
row_start_2 = -(l_r - overlap[0])
row_end_2 = None
shift_horiz = feature.register_translation(
imgs[i_rows, j_cols, row_start_1:row_end_1, -overlap[1] :],
imgs[i_rows, j_cols + 1, row_start_2:row_end_2, : overlap[1]],
)[0]
init_r += int(shift_horiz[0]) - (overlap[0])
init_c += int(shift_horiz[1]) - overlap[1]
shifts[i_rows, j_cols + 1] = init_r, init_c
# Fill canvas and weights
weighted_img = imgs[i_rows, j_cols + 1] * blending_mask
canvas[init_r : init_r + l_r, init_c : init_c + l_c] += weighted_img
weights[init_r : init_r + l_r, init_c : init_c + l_c] += blending_mask
canvas /= weights + 1.0e-5
return autocrop(np.rint(canvas).astype(np.uint8))
```
#### File: apps/dash-svm/app.py
```python
import time
import importlib
import dash
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
from dash.dependencies import Input, Output, State
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
from sklearn.svm import SVC
import utils.dash_reusable_components as drc
import utils.figures as figs
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
app.title = "Support Vector Machine"
server = app.server
def generate_data(n_samples, dataset, noise):
if dataset == "moons":
return datasets.make_moons(n_samples=n_samples, noise=noise, random_state=0)
elif dataset == "circles":
return datasets.make_circles(
n_samples=n_samples, noise=noise, factor=0.5, random_state=1
)
elif dataset == "linear":
X, y = datasets.make_classification(
n_samples=n_samples,
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1,
)
rng = np.random.RandomState(2)
X += noise * rng.uniform(size=X.shape)
linearly_separable = (X, y)
return linearly_separable
else:
raise ValueError(
"Data type incorrectly specified. Please choose an existing dataset."
)
app.layout = html.Div(
children=[
# .container class is fixed, .container.scalable is scalable
html.Div(
className="banner",
children=[
# Change App Name here
html.Div(
className="container scalable",
children=[
# Change App Name here
html.H2(
id="banner-title",
children=[
html.A(
"Support Vector Machine (SVM) Explorer",
href="https://github.com/plotly/dash-svm",
style={
"text-decoration": "none",
"color": "inherit",
},
)
],
),
html.A(
id="banner-logo",
children=[
html.Img(src=app.get_asset_url("dash-logo-new.png"))
],
href="https://plot.ly/products/dash/",
),
],
)
],
),
html.Div(
id="body",
className="container scalable",
children=[
html.Div(
id="app-container",
# className="row",
children=[
html.Div(
# className="three columns",
id="left-column",
children=[
drc.Card(
id="first-card",
children=[
drc.NamedDropdown(
name="Select Dataset",
id="dropdown-select-dataset",
options=[
{"label": "Moons", "value": "moons"},
{
"label": "Linearly Separable",
"value": "linear",
},
{
"label": "Circles",
"value": "circles",
},
],
clearable=False,
searchable=False,
value="moons",
),
drc.NamedSlider(
name="Sample Size",
id="slider-dataset-sample-size",
min=100,
max=500,
step=100,
marks={
str(i): str(i)
for i in [100, 200, 300, 400, 500]
},
value=300,
),
drc.NamedSlider(
name="Noise Level",
id="slider-dataset-noise-level",
min=0,
max=1,
marks={
i / 10: str(i / 10)
for i in range(0, 11, 2)
},
step=0.1,
value=0.2,
),
],
),
drc.Card(
id="button-card",
children=[
drc.NamedSlider(
name="Threshold",
id="slider-threshold",
min=0,
max=1,
value=0.5,
step=0.01,
),
html.Button(
"Reset Threshold",
id="button-zero-threshold",
),
],
),
drc.Card(
id="last-card",
children=[
drc.NamedDropdown(
name="Kernel",
id="dropdown-svm-parameter-kernel",
options=[
{
"label": "Radial basis function (RBF)",
"value": "rbf",
},
{"label": "Linear", "value": "linear"},
{
"label": "Polynomial",
"value": "poly",
},
{
"label": "Sigmoid",
"value": "sigmoid",
},
],
value="rbf",
clearable=False,
searchable=False,
),
drc.NamedSlider(
name="Cost (C)",
id="slider-svm-parameter-C-power",
min=-2,
max=4,
value=0,
marks={
i: "{}".format(10 ** i)
for i in range(-2, 5)
},
),
drc.FormattedSlider(
id="slider-svm-parameter-C-coef",
min=1,
max=9,
value=1,
),
drc.NamedSlider(
name="Degree",
id="slider-svm-parameter-degree",
min=2,
max=10,
value=3,
step=1,
marks={
str(i): str(i) for i in range(2, 11, 2)
},
),
drc.NamedSlider(
name="Gamma",
id="slider-svm-parameter-gamma-power",
min=-5,
max=0,
value=-1,
marks={
i: "{}".format(10 ** i)
for i in range(-5, 1)
},
),
drc.FormattedSlider(
id="slider-svm-parameter-gamma-coef",
min=1,
max=9,
value=5,
),
html.Div(
id="shrinking-container",
children=[
html.P(children="Shrinking"),
dcc.RadioItems(
id="radio-svm-parameter-shrinking",
labelStyle={
"margin-right": "7px",
"display": "inline-block",
},
options=[
{
"label": " Enabled",
"value": "True",
},
{
"label": " Disabled",
"value": "False",
},
],
value="True",
),
],
),
],
),
],
),
html.Div(
id="div-graphs",
children=dcc.Graph(
id="graph-sklearn-svm",
figure=dict(
layout=dict(
plot_bgcolor="#282b38", paper_bgcolor="#282b38"
)
),
),
),
],
)
],
),
]
)
@app.callback(
Output("slider-svm-parameter-gamma-coef", "marks"),
[Input("slider-svm-parameter-gamma-power", "value")],
)
def update_slider_svm_parameter_gamma_coef(power):
scale = 10 ** power
return {i: str(round(i * scale, 8)) for i in range(1, 10, 2)}
@app.callback(
Output("slider-svm-parameter-C-coef", "marks"),
[Input("slider-svm-parameter-C-power", "value")],
)
def update_slider_svm_parameter_C_coef(power):
scale = 10 ** power
return {i: str(round(i * scale, 8)) for i in range(1, 10, 2)}
@app.callback(
Output("slider-threshold", "value"),
[Input("button-zero-threshold", "n_clicks")],
[State("graph-sklearn-svm", "figure")],
)
def reset_threshold_center(n_clicks, figure):
if n_clicks:
Z = np.array(figure["data"][0]["z"])
value = -Z.min() / (Z.max() - Z.min())
else:
value = 0.4959986285375595
return value
# Disable Sliders if kernel not in the given list
@app.callback(
Output("slider-svm-parameter-degree", "disabled"),
[Input("dropdown-svm-parameter-kernel", "value")],
)
def disable_slider_param_degree(kernel):
return kernel != "poly"
@app.callback(
Output("slider-svm-parameter-gamma-coef", "disabled"),
[Input("dropdown-svm-parameter-kernel", "value")],
)
def disable_slider_param_gamma_coef(kernel):
return kernel not in ["rbf", "poly", "sigmoid"]
@app.callback(
Output("slider-svm-parameter-gamma-power", "disabled"),
[Input("dropdown-svm-parameter-kernel", "value")],
)
def disable_slider_param_gamma_power(kernel):
return kernel not in ["rbf", "poly", "sigmoid"]
@app.callback(
Output("div-graphs", "children"),
[
Input("dropdown-svm-parameter-kernel", "value"),
Input("slider-svm-parameter-degree", "value"),
Input("slider-svm-parameter-C-coef", "value"),
Input("slider-svm-parameter-C-power", "value"),
Input("slider-svm-parameter-gamma-coef", "value"),
Input("slider-svm-parameter-gamma-power", "value"),
Input("dropdown-select-dataset", "value"),
Input("slider-dataset-noise-level", "value"),
Input("radio-svm-parameter-shrinking", "value"),
Input("slider-threshold", "value"),
Input("slider-dataset-sample-size", "value"),
],
)
def update_svm_graph(
kernel,
degree,
C_coef,
C_power,
gamma_coef,
gamma_power,
dataset,
noise,
shrinking,
threshold,
sample_size,
):
t_start = time.time()
h = 0.3 # step size in the mesh
# Data Pre-processing
X, y = generate_data(n_samples=sample_size, dataset=dataset, noise=noise)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
x_min = X[:, 0].min() - 0.5
x_max = X[:, 0].max() + 0.5
y_min = X[:, 1].min() - 0.5
y_max = X[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
C = C_coef * 10 ** C_power
gamma = gamma_coef * 10 ** gamma_power
if shrinking == "True":
flag = True
else:
flag = False
# Train SVM
clf = SVC(C=C, kernel=kernel, degree=degree, gamma=gamma, shrinking=flag)
clf.fit(X_train, y_train)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
prediction_figure = figs.serve_prediction_plot(
model=clf,
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
Z=Z,
xx=xx,
yy=yy,
mesh_step=h,
threshold=threshold,
)
roc_figure = figs.serve_roc_curve(model=clf, X_test=X_test, y_test=y_test)
confusion_figure = figs.serve_pie_confusion_matrix(
model=clf, X_test=X_test, y_test=y_test, Z=Z, threshold=threshold
)
return [
html.Div(
id="svm-graph-container",
children=dcc.Loading(
className="graph-wrapper",
children=dcc.Graph(id="graph-sklearn-svm", figure=prediction_figure),
style={"display": "none"},
),
),
html.Div(
id="graphs-container",
children=[
dcc.Loading(
className="graph-wrapper",
children=dcc.Graph(id="graph-line-roc-curve", figure=roc_figure),
),
dcc.Loading(
className="graph-wrapper",
children=dcc.Graph(
id="graph-pie-confusion-matrix", figure=confusion_figure
),
),
],
),
]
# Running the server
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-synapse-demo/utils.py
```python
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
def OptionMenu(values, label, **kwargs):
options = [{"label": s.replace("_", " ").capitalize(), "value": s} for s in values]
kwargs["value"] = kwargs.get("value", values[0])
if len(options) <= 4:
component = dbc.RadioItems
kwargs["inline"] = True
else:
component = dbc.Select
return dbc.FormGroup([dbc.Label(label), component(options=options, **kwargs)])
def CustomRangeSlider(values, label, **kwargs):
values = sorted(values)
marks = {i: f"{i//1000}k" for i in values}
return dbc.FormGroup(
[
dbc.Label(label),
dcc.RangeSlider(
min=values[0],
max=values[-1],
step=1000,
value=[values[1], values[-2]],
marks=marks,
**kwargs,
),
]
)
def get_unique(connection, db, table, col):
query = f"""
SELECT DISTINCT {col}
FROM {db}.dbo.{table};
"""
return [x[0] for x in connection.execute(query).fetchall()]
def get_range(connection, db, table, col):
query = f"""
SELECT MIN({col}), MAX({col})
FROM {db}.dbo.{table};
"""
return connection.execute(query).fetchall()[0]
def get_column_strings(df):
# Load the actual csv file
# Create SQL columns based on the columns of that dataframe
types = (
df.dtypes.copy()
.replace("float64", "FLOAT")
.replace("int64", "INT")
.replace("object", "VARCHAR(100) COLLATE Latin1_General_BIN2")
)
ls = [f"{ix.lower()} {t}" for ix, t in zip(types.index, types.values)]
return ",\n".join(ls)
```
#### File: apps/dash-video-detection/app.py
```python
import time
from base64 import b64encode
from pprint import pprint
import cv2
import dash
import dash_player
import dash_table
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
from PIL import ImageColor, Image
import plotly.express as px
import tensorflow as tf
import tensorflow_hub as hub
def Header(name, app):
title = html.H2(name, style={"margin-top": 5})
logo = html.Img(
src=app.get_asset_url("dash-logo.png"), style={"float": "right", "height": 50}
)
link = html.A(logo, href="https://plotly.com/dash/")
return dbc.Row([dbc.Col(title, md=8), dbc.Col(link, md=4)])
def add_editable_box(
fig, x0, y0, x1, y1, name=None, color=None, opacity=1, group=None, text=None
):
fig.add_shape(
editable=True,
x0=x0,
y0=y0,
x1=x1,
y1=y1,
line_color=color,
opacity=opacity,
line_width=3,
name=name,
)
# Load colors and detector
colors = list(ImageColor.colormap.values())
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
detector = hub.load(module_handle).signatures["default"]
# Start app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.UNITED])
server = app.server
controls = [
dbc.Select(
id="scene",
options=[{"label": f"Scene #{i}", "value": i} for i in range(1, 4)],
value=1,
),
dbc.Button(
"Detect Frame", id="detect-frame", n_clicks=0, color="primary", block=True
),
html.A(
dbc.Button("Download", n_clicks=0, color="info", outline=True, block=True),
download="annotations.csv",
id="download",
),
]
video = dbc.Card(
[
dbc.CardBody(
dash_player.DashPlayer(
id="video", width="100%", height="auto", controls=True
)
)
]
)
graph_detection = dbc.Card(
[
dbc.CardBody(
dcc.Graph(
id="graph-detection",
config={"modeBarButtonsToAdd": ["drawrect"]},
style={"height": "calc(50vh - 100px)"},
)
)
]
)
record_table = dbc.Card(
dash_table.DataTable(
id="record-table",
editable=True,
columns=[
{"name": i, "id": i}
for i in [
"scene",
"time",
"order",
"object",
"xmin",
"xmax",
"ymin",
"ymax",
]
],
data=[],
page_size=10,
),
body=True,
)
app.layout = dbc.Container(
[
Header("Dash AV Video Detection", app),
html.Hr(),
dbc.Row(
[
dbc.Col(
[
video,
html.Br(),
dbc.Card(dbc.Row([dbc.Col(c) for c in controls]), body=True),
],
md=7,
),
dbc.Col([graph_detection, html.Br(), record_table], md=5),
]
),
dcc.Store(id="store-figure"),
# dcc.Location(id='url'),
],
fluid=True,
)
@app.callback(Output("video", "url"), [Input("scene", "value")])
def update_scene(i):
return app.get_asset_url(f"scene_{i}.mov")
@app.callback(Output("download", "href"), [Input("record-table", "data")])
def update_download_href(data):
df = pd.DataFrame.from_records(data)
df_b64 = b64encode(df.to_csv(index=False).encode())
return "data:text/csv;base64," + df_b64.decode()
@app.callback(
Output("record-table", "data"),
[Input("graph-detection", "relayoutData")],
[
State("graph-detection", "figure"),
State("record-table", "data"),
State("video", "currentTime"),
State("scene", "value"),
],
)
def update_table(relayout_data, figure, table_data, curr_time, scene):
if relayout_data is None or figure is None:
return dash.no_update
keys = list(relayout_data.keys())
shapes = figure["layout"]["shapes"]
if len(keys) == 0:
return dash.no_update
elif "shapes" in keys:
shapes = relayout_data["shapes"]
i = len(shapes) - 1
elif "shapes[" in keys[0]:
i = int(keys[0].replace("shapes[", "").split("].")[0])
else:
return dash.no_update
if i >= len(shapes):
return dash.no_update
filtered_table_data = [
row
for row in table_data
if not (
row["order"] == i
and row["time"] == round(curr_time, 6)
and row["scene"] == scene
)
]
new_shape = shapes[i]
new = {
"time": round(curr_time, 6),
"scene": scene,
"object": new_shape.get("name", "New"),
"order": i,
"xmin": round(new_shape["x0"], 1),
"xmax": round(new_shape["x1"], 1),
"ymin": round(new_shape["y0"], 1),
"ymax": round(new_shape["y1"], 1),
}
filtered_table_data.append(new)
return filtered_table_data
@app.callback(
Output("graph-detection", "figure"),
[Input("store-figure", "data"), Input("graph-detection", "relayoutData")],
)
def store_to_graph(data, relayout_data):
ctx = dash.callback_context
if not ctx.triggered:
return dash.no_update
prop_id = ctx.triggered[0]["prop_id"]
if prop_id == "store-figure.data":
return data
if "shapes" in relayout_data:
data["layout"]["shapes"] = relayout_data.get("shapes")
return data
else:
return dash.no_update
@app.callback(
Output("store-figure", "data"),
[Input("detect-frame", "n_clicks")],
[State("scene", "value"), State("video", "currentTime")],
)
def show_time(n_clicks, scene, ms):
if ms is None or scene is None:
return dash.no_update
t0 = time.time()
cap = cv2.VideoCapture(f"./data/scene-{scene}.mov")
cap.read()
cap.set(cv2.CAP_PROP_POS_MSEC, 1000 * ms)
ret, frame = cap.read()
img = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
resized = cv2.resize(img, (512, 512))
image_tensor = tf.image.convert_image_dtype(resized, tf.float32)[tf.newaxis, ...]
result = detector(image_tensor)
boxes = result["detection_boxes"].numpy()
scores = result["detection_scores"].numpy()
labels = result["detection_class_entities"].numpy()
class_ids = result["detection_class_labels"].numpy()
# Start build figure
im = Image.fromarray(img)
fig = px.imshow(im, binary_format="jpg")
fig.update_xaxes(visible=False)
fig.update_yaxes(visible=False)
fig.update_layout(
showlegend=False, margin=dict(l=0, r=0, t=0, b=0), uirevision=n_clicks
)
for i in range(min(10, boxes.shape[0])):
class_id = scores[i].argmax()
label = labels[i].decode("ascii")
confidence = scores[i].max()
# ymin, xmin, ymax, xmax
y0, x0, y1, x1 = boxes[i]
x0 *= im.size[0]
x1 *= im.size[0]
y0 *= im.size[1]
y1 *= im.size[1]
color = colors[class_ids[i] % len(colors)]
text = f"{label}: {int(confidence*100)}%"
if confidence > 0.1:
add_editable_box(
fig, x0, y0, x1, y1, group=label, name=label, color=color, text=text
)
print(f"Detected in {time.time() - t0:.2f}s.")
return fig
if __name__ == "__main__":
app.run_server(debug=True)
```
#### File: apps/dash-video-detection/generate_video.py
```python
import cv2
import plotly.express as px
import os
from tqdm import tqdm
import tensorflow_hub as hub
import tensorflow as tf
from PIL import Image
# For drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
def draw_bounding_box_on_image(
image, ymin, xmin, ymax, xmax, color, font, thickness=4, display_str_list=()
):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (
xmin * im_width,
xmax * im_width,
ymin * im_height,
ymax * im_height,
)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color,
)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font,
)
text_bottom -= text_height - 2 * margin
def draw_boxes(
image, boxes, class_ids, class_names, scores, font, max_boxes=10, min_score=0.1
):
"""Overlay labeled boxes on an image with formatted scores and label names."""
colors = list(ImageColor.colormap.values())
for i in range(min(boxes.shape[0], max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(
class_names[i].decode("ascii"), int(100 * scores[i])
)
color = colors[class_ids[i] % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
draw_bounding_box_on_image(
image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str],
)
np.copyto(image, np.array(image_pil))
return np.array(image_pil)
def fast_draw_boxes(
image, boxes, class_ids, class_names, scores, font, max_boxes=10, min_score=0.1
):
"""Overlay labeled boxes on an image with formatted scores and label names."""
colors = list(ImageColor.colormap.values())
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
for i in range(min(boxes.shape[0], max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(
class_names[i].decode("ascii"), int(100 * scores[i])
)
color = colors[class_ids[i] % len(colors)]
draw_bounding_box_on_image(
image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str],
)
# np.copyto(image, np.array(image_pil))
return np.array(image_pil)
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
detector = hub.load(module_handle).signatures["default"]
codec = "XVID"
try:
font = ImageFont.truetype(
"/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf", 25
)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
i = 2
# Define the codec and create VideoWriter object
VIDEO_PATH = f"./data/scene-{i}.mov"
VIDEO_OUT = f"./data/processed/scene_{i}.mov"
frames = []
cap = cv2.VideoCapture(VIDEO_PATH)
ret = True
while ret:
ret, frame = cap.read()
if ret:
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
processed_frames = []
sample_rate = 3
for i, img in enumerate(tqdm(frames)):
if i % sample_rate == 0:
resized = cv2.resize(img, (512, 512))
image_tensor = tf.image.convert_image_dtype(resized, tf.float32)[
tf.newaxis, ...
]
result = detector(image_tensor)
image_with_boxes = fast_draw_boxes(
img.copy(),
result["detection_boxes"].numpy(),
result["detection_class_labels"].numpy(),
result["detection_class_entities"].numpy(),
result["detection_scores"].numpy(),
font=font,
)
processed_frames.append(image_with_boxes)
fourcc = cv2.VideoWriter_fourcc(*codec)
out = cv2.VideoWriter(VIDEO_OUT, fourcc, 30, (1280, 720))
for frame in tqdm(processed_frames):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
out.write(frame)
out.release()
```
|
{
"source": "jeroenvermeulen/netbox-dns",
"score": 2
}
|
#### File: netbox-dns/netbox_dns/filters.py
```python
import django_filters
from django.db.models import Q
from extras.filters import TagFilter
from netbox.filtersets import PrimaryModelFilterSet
from .models import NameServer, Record, Zone
class ZoneFilter(PrimaryModelFilterSet):
"""Filter capabilities for Zone instances."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
name = django_filters.CharFilter(
lookup_expr="icontains",
)
status = django_filters.ChoiceFilter(
choices=Zone.CHOICES,
)
tag = TagFilter()
class Meta:
model = Zone
fields = ("name", "status", "nameservers", "tag")
def search(self, queryset, name, value):
"""Perform the filtered search."""
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(status__icontains=value)
return queryset.filter(qs_filter)
class NameServerFilter(PrimaryModelFilterSet):
"""Filter capabilities for NameServer instances."""
name = django_filters.CharFilter(
lookup_expr="icontains",
)
tag = TagFilter()
class Meta:
model = NameServer
fields = ("name", "tag")
class RecordFilter(PrimaryModelFilterSet):
"""Filter capabilities for Record instances."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
type = django_filters.MultipleChoiceFilter(
choices=Record.CHOICES,
null_value=None,
)
name = django_filters.CharFilter(
lookup_expr="icontains",
)
value = django_filters.CharFilter(
lookup_expr="icontains",
)
zone_id = django_filters.ModelMultipleChoiceFilter(
queryset=Zone.objects.all(),
label="Parent Zone ID",
)
zone = django_filters.ModelMultipleChoiceFilter(
field_name="zone__name",
to_field_name="name",
queryset=Zone.objects.all(),
label="Parent Zone",
)
tag = TagFilter()
managed = django_filters.BooleanFilter()
class Meta:
model = Record
fields = ("type", "name", "value", "tag", "managed")
def search(self, queryset, name, value):
"""Perform the filtered search."""
if not value.strip():
return queryset
qs_filter = (
Q(name__icontains=value)
| Q(value__icontains=value)
| Q(zone__name__icontains=value)
)
return queryset.filter(qs_filter)
```
#### File: netbox-dns/netbox_dns/forms.py
```python
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import (
MinValueValidator,
MaxValueValidator,
validate_ipv6_address,
validate_ipv4_address,
)
from django.forms import (
CharField,
IntegerField,
BooleanField,
NullBooleanField,
)
from django.urls import reverse_lazy
from extras.forms import AddRemoveTagsForm
from extras.models.tags import Tag
from utilities.forms import (
CSVModelForm,
BootstrapMixin,
BulkEditNullBooleanSelect,
DynamicModelMultipleChoiceField,
TagFilterField,
StaticSelect,
CSVChoiceField,
CSVModelChoiceField,
DynamicModelChoiceField,
APISelect,
StaticSelectMultiple,
add_blank_choice,
)
from .fields import CustomDynamicModelMultipleChoiceField
from .models import NameServer, Record, Zone
class BulkEditForm(forms.Form):
"""Base form for editing multiple objects in bulk."""
def __init__(self, model, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.nullable_fields = []
if hasattr(self.Meta, "nullable_fields"):
self.nullable_fields = self.Meta.nullable_fields
class ZoneForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new Zone object."""
def __init__(self, *args, **kwargs):
"""Override the __init__ method in order to provide the initial value for the default fields"""
super().__init__(*args, **kwargs)
defaults = settings.PLUGINS_CONFIG.get("netbox_dns")
def _initialize(initial, setting):
if initial.get(setting, None) in (None, ""):
initial[setting] = defaults.get(f"zone_{setting}", None)
for setting in (
"default_ttl",
"soa_ttl",
"soa_rname",
"soa_serial_auto",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
):
_initialize(self.initial, setting)
if self.initial.get("soa_ttl", None) is None:
self.initial["soa_ttl"] = self.initial.get("default_ttl", None)
if self.initial.get("soa_serial_auto"):
self.initial["soa_serial"] = None
if self.initial.get("soa_mname", None) in (None, ""):
default_soa_mname = defaults.get("zone_soa_mname", None)
if default_soa_mname is not None:
try:
self.initial["soa_mname"] = NameServer.objects.get(
name=default_soa_mname
)
except NameServer.DoesNotExist:
pass
if not self.initial.get("nameservers", []):
default_nameservers = defaults.get("zone_nameservers", [])
if default_nameservers:
self.initial["nameservers"] = NameServer.objects.filter(
name__in=default_nameservers
)
def clean_default_ttl(self):
return (
self.cleaned_data["default_ttl"]
if self.cleaned_data["default_ttl"]
else self.initial["default_ttl"]
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
default_ttl = IntegerField(
required=False,
label="Default TTL",
help_text="Default TTL for new records in this zone",
validators=[MinValueValidator(1)],
)
soa_ttl = IntegerField(
required=True,
label="SOA TTL",
help_text="TTL for the SOA record of the zone",
validators=[MinValueValidator(1)],
)
soa_rname = CharField(
required=True,
label="SOA Responsible",
help_text="Mailbox of the zone's administrator",
)
soa_serial_auto = BooleanField(
required=False,
label="Generate SOA Serial",
help_text="Automatically generate the SOA Serial",
)
soa_serial = IntegerField(
required=False,
label="SOA Serial",
help_text="Serial number of the current zone data version",
validators=[MinValueValidator(1)],
)
soa_refresh = IntegerField(
required=True,
label="SOA Refresh",
help_text="Refresh interval for secondary name servers",
validators=[MinValueValidator(1)],
)
soa_retry = IntegerField(
required=True,
label="SOA Retry",
help_text="Retry interval for secondary name servers",
validators=[MinValueValidator(1)],
)
soa_expire = IntegerField(
required=True,
label="SOA Expire",
help_text="Expire time after which the zone is considered unavailable",
validators=[MinValueValidator(1)],
)
soa_minimum = IntegerField(
required=True,
label="SOA Minimum TTL",
help_text="Minimum TTL for negative results, e.g. NXRRSET",
validators=[MinValueValidator(1)],
)
class Meta:
model = Zone
fields = (
"name",
"status",
"nameservers",
"default_ttl",
"tags",
"soa_ttl",
"soa_mname",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
widgets = {
"status": StaticSelect(),
"soa_mname": StaticSelect(),
}
help_texts = {
"soa_mname": "Primary name server for the zone",
}
class ZoneFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering Zone instances."""
model = Zone
q = CharField(
required=False,
widget=forms.TextInput(attrs={"placeholder": "Name or Status"}),
label="Search",
)
status = forms.ChoiceField(
choices=add_blank_choice(Zone.CHOICES),
required=False,
widget=StaticSelect(),
)
name = CharField(
required=False,
label="Name",
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
tag = TagFilterField(Zone)
class ZoneCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
status = CSVChoiceField(
choices=Zone.CHOICES,
help_text="Zone status",
)
default_ttl = IntegerField(
required=False,
help_text="Default TTL",
)
soa_ttl = IntegerField(
required=False,
help_text="TTL for the SOA record of the zone",
)
soa_mname = CSVModelChoiceField(
queryset=NameServer.objects.all(),
required=False,
to_field_name="name",
help_text="Primary name server for the zone",
error_messages={
"invalid_choice": "Nameserver not found.",
},
)
soa_rname = CharField(
required=False,
help_text="Mailbox of the zone's administrator",
)
soa_serial_auto = BooleanField(
required=False,
help_text="Generate the SOA serial",
)
soa_serial = IntegerField(
required=False,
help_text="Serial number of the current zone data version",
)
soa_refresh = IntegerField(
required=False,
help_text="Refresh interval for secondary name servers",
)
soa_retry = IntegerField(
required=False,
help_text="Retry interval for secondary name servers",
)
soa_expire = IntegerField(
required=False,
help_text="Expire time after which the zone is considered unavailable",
)
soa_minimum = IntegerField(
required=False,
help_text="Minimum TTL for negative results, e.g. NXRRSET",
)
def _get_default_value(self, field):
_default_values = settings.PLUGINS_CONFIG.get("netbox_dns", dict())
if _default_values.get("zone_soa_ttl", None) is None:
_default_values["zone_soa_ttl"] = _default_values.get(
"zone_default_ttl", None
)
return _default_values.get(f"zone_{field}", None)
def _clean_field_with_defaults(self, field):
if self.cleaned_data[field]:
value = self.cleaned_data[field]
else:
value = self._get_default_value(field)
if value is None:
raise ValidationError(f"{field} not set and no default value available")
return value
def clean_default_ttl(self):
return self._clean_field_with_defaults("default_ttl")
def clean_soa_ttl(self):
return self._clean_field_with_defaults("soa_ttl")
def clean_soa_mname(self):
return self._clean_field_with_defaults("soa_mname")
def clean_soa_rname(self):
return self._clean_field_with_defaults("soa_rname")
def clean_soa_serial_auto(self):
try:
return self._clean_field_with_defaults("soa_serial_auto")
except ValidationError:
if self.cleaned_data["soa_serial"] or self._get_default_value("soa_serial"):
return None
else:
raise
def clean_soa_serial(self):
try:
return self._clean_field_with_defaults("soa_serial")
except ValidationError:
if self.cleaned_data["soa_serial_auto"] or self._get_default_value(
"soa_serial_auto"
):
return None
else:
raise
def clean_soa_refresh(self):
return self._clean_field_with_defaults("soa_refresh")
def clean_soa_retry(self):
return self._clean_field_with_defaults("soa_retry")
def clean_soa_expire(self):
return self._clean_field_with_defaults("soa_expire")
def clean_soa_minimum(self):
return self._clean_field_with_defaults("soa_minimum")
class Meta:
model = Zone
fields = (
"name",
"status",
"default_ttl",
"soa_ttl",
"soa_mname",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
class ZoneBulkEditForm(BootstrapMixin, AddRemoveTagsForm, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Zone.objects.all(),
widget=forms.MultipleHiddenInput(),
)
status = forms.ChoiceField(
choices=add_blank_choice(Zone.CHOICES),
required=False,
widget=StaticSelect(),
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
default_ttl = IntegerField(
required=False,
label="Default TTL",
validators=[MinValueValidator(1)],
)
soa_ttl = IntegerField(
required=False,
label="SOA TTL",
validators=[MinValueValidator(1)],
)
soa_mname = DynamicModelChoiceField(
queryset=NameServer.objects.all(),
required=False,
label="SOA Primary Nameserver",
widget=APISelect(
attrs={
"data-url": reverse_lazy("plugins-api:netbox_dns-api:nameserver-list")
}
),
)
soa_rname = CharField(
required=False,
label="SOA Responsible",
)
soa_serial_auto = NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
label="Generate SOA Serial",
)
soa_serial = IntegerField(
required=False,
label="SOA Serial",
validators=[MinValueValidator(1), MaxValueValidator(4294967295)],
)
soa_refresh = IntegerField(
required=False,
label="SOA Refresh",
validators=[MinValueValidator(1)],
)
soa_retry = IntegerField(
required=False,
label="SOA Retry",
validators=[MinValueValidator(1)],
)
soa_expire = IntegerField(
required=False,
label="SOA Expire",
validators=[MinValueValidator(1)],
)
soa_minimum = IntegerField(
required=False,
label="SOA Minimum TTL",
validators=[MinValueValidator(1)],
)
def clean(self):
"""
If soa_serial_auto is True, set soa_serial to None.
"""
cleaned_data = super().clean()
if cleaned_data.get("soa_serial_auto"):
cleaned_data["soa_serial"] = None
class Meta:
nullable_fields = []
model = Zone
fields = (
"name",
"status",
"nameservers",
"default_ttl",
"tags",
"soa_ttl",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
widgets = {
"status": StaticSelect(),
}
class NameServerForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new NameServer object."""
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
class Meta:
model = NameServer
fields = ("name", "tags")
class NameServerFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering NameServer instances."""
model = NameServer
name = CharField(
required=False,
label="Name",
)
tag = TagFilterField(NameServer)
class NameServerCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
class Meta:
model = NameServer
fields = ("name",)
class NameServerBulkEditForm(BootstrapMixin, AddRemoveTagsForm, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=NameServer.objects.all(),
widget=forms.MultipleHiddenInput(),
)
class Meta:
nullable_fields = []
model = NameServer
fields = ("name", "tags")
class RecordForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new Record object."""
def clean(self):
"""
For A and AAA records, verify that a valid IPv4 or IPv6 was passed as
value and raise a ValidationError exception otherwise.
"""
cleaned_data = super().clean()
type = cleaned_data.get("type")
if type not in (Record.A, Record.AAAA):
return
value = cleaned_data.get("value")
try:
ip_version = "4" if type == Record.A else "6"
if type == Record.A:
validate_ipv4_address(value)
else:
validate_ipv6_address(value)
except ValidationError:
raise forms.ValidationError(
{
"value": f"A valid IPv{ip_version} address is required for record type {type}."
}
)
if cleaned_data.get("disable_ptr"):
return
pk = cleaned_data.get("pk")
conflicts = Record.objects.filter(value=value, type=type, disable_ptr=False)
if self.instance.pk:
conflicts = conflicts.exclude(pk=self.instance.pk)
if len(conflicts):
raise forms.ValidationError(
{
"value": f"There is already an {type} record with value {value} and PTR enabled."
}
)
def clean_ttl(self):
ttl = self.cleaned_data["ttl"]
if ttl is not None:
if ttl <= 0:
raise ValidationError("TTL must be greater than zero")
return ttl
else:
return self.cleaned_data["zone"].default_ttl
disable_ptr = BooleanField(
label="Disable PTR",
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
ttl = IntegerField(
required=False,
label="TTL",
)
class Meta:
model = Record
fields = ("zone", "type", "disable_ptr", "name", "value", "ttl", "tags")
widgets = {
"zone": StaticSelect(),
"type": StaticSelect(),
}
class RecordFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering Record instances."""
model = Record
q = CharField(
required=False,
widget=forms.TextInput(attrs={"placeholder": "Name, Zone or Value"}),
label="Search",
)
type = forms.MultipleChoiceField(
choices=add_blank_choice(Record.CHOICES),
required=False,
widget=StaticSelectMultiple(),
)
name = CharField(
required=False,
label="Name",
)
value = CharField(
required=False,
label="Value",
)
zone_id = CustomDynamicModelMultipleChoiceField(
queryset=Zone.objects.all(),
required=False,
label="Zone",
)
tag = TagFilterField(Record)
class RecordCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
zone = CSVModelChoiceField(
queryset=Zone.objects.all(),
to_field_name="name",
required=True,
help_text="Assigned zone",
)
type = CSVChoiceField(
choices=Record.CHOICES,
required=True,
help_text="Record Type",
)
ttl = IntegerField(
required=False,
help_text="TTL",
)
disable_ptr = forms.BooleanField(
required=False,
label="Disable PTR",
help_text="Disable generation of a PTR record",
)
def clean(self):
"""
For A and AAA records, verify that a valid IPv4 or IPv6 was passed as
value and raise a ValidationError exception otherwise.
"""
cleaned_data = super().clean()
type = cleaned_data.get("type")
if type not in (Record.A, Record.AAAA):
return
value = cleaned_data.get("value")
try:
ip_version = "4" if type == Record.A else "6"
if type == Record.A:
validate_ipv4_address(value)
else:
validate_ipv6_address(value)
except ValidationError:
raise forms.ValidationError(
{
"value": f"A valid IPv{ip_version} address is required for record type {type}."
}
)
if cleaned_data.get("disable_ptr"):
return
conflicts = Record.objects.filter(value=value, type=type, disable_ptr=False)
if len(conflicts):
raise forms.ValidationError(
{
"value": f"There is already an {type} record with value {value} and PTR enabled."
}
)
def clean_ttl(self):
ttl = self.cleaned_data["ttl"]
if ttl is not None:
if ttl <= 0:
raise ValidationError("TTL must be greater than zero")
return ttl
elif "zone" in self.cleaned_data:
return self.cleaned_data["zone"].default_ttl
class Meta:
model = Record
fields = ("zone", "type", "name", "value", "ttl", "disable_ptr")
class RecordBulkEditForm(BootstrapMixin, AddRemoveTagsForm, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Record.objects.all(), widget=forms.MultipleHiddenInput()
)
zone = DynamicModelChoiceField(
queryset=Zone.objects.all(),
required=False,
widget=APISelect(
attrs={"data-url": reverse_lazy("plugins-api:netbox_dns-api:zone-list")}
),
)
disable_ptr = NullBooleanField(
required=False, widget=BulkEditNullBooleanSelect(), label="Disable PTR"
)
ttl = IntegerField(
required=False,
label="TTL",
)
def clean(self):
"""
For A and AAA records, verify that a valid IPv4 or IPv6 was passed as
value and raise a ValidationError exception otherwise.
"""
cleaned_data = super().clean()
disable_ptr = cleaned_data.get("disable_ptr")
if disable_ptr is None or disable_ptr:
return
for record in cleaned_data.get("pk"):
conflicts = (
Record.objects.filter(Record.unique_ptr_qs)
.filter(value=record.value)
.exclude(pk=record.pk)
)
if len(conflicts):
raise forms.ValidationError(
{
"disable_ptr": f"Multiple {record.type} records with value {record.value} and PTR enabled."
}
)
def clean_ttl(self):
ttl = self.cleaned_data["ttl"]
if ttl is not None:
if ttl <= 0:
raise ValidationError("TTL must be greater than zero")
return ttl
class Meta:
model = Record
fields = ("zone", "ttl", "disable_ptr", "tags")
nullable_fields = []
widgets = {
"zone": StaticSelect(),
}
```
#### File: netbox_dns/migrations/0008_zone_status_names.py
```python
from django.db import migrations
from netbox_dns.models import Zone, Record
def rename_passive_status_to_parked(apps, schema_editor):
Zone = apps.get_model("netbox_dns", "Zone")
for zone in Zone.objects.filter(status="passive"):
zone.update(status=Zone.STATUS_PARKED)
class Migration(migrations.Migration):
dependencies = [
("netbox_dns", "0005_update_ns_records"),
]
operations = [
migrations.RunPython(rename_passive_status_to_parked),
]
```
#### File: netbox-dns/netbox_dns/models.py
```python
import ipaddress
from math import ceil
from datetime import datetime
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, transaction
from django.db.models import Q, Max, ExpressionWrapper, BooleanField
from django.db.models.functions import Length
from django.urls import reverse
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from extras.utils import extras_features
from netbox.models import PrimaryModel, TaggableManager
from utilities.querysets import RestrictedQuerySet
@extras_features("custom_links", "export_templates", "webhooks")
class NameServer(PrimaryModel):
name = models.CharField(
unique=True,
max_length=255,
)
tags = TaggableManager(
through="extras.TaggedItem",
blank=True,
)
objects = RestrictedQuerySet.as_manager()
clone_fields = ["name"]
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("plugins:netbox_dns:nameserver", kwargs={"pk": self.pk})
class ZoneManager(models.Manager.from_queryset(RestrictedQuerySet)):
"""Special Manager for zones providing the activity status annotation"""
def get_queryset(self):
return (
super(ZoneManager, self)
.get_queryset()
.annotate(
active=ExpressionWrapper(
Q(status__in=Zone.ACTIVE_STATUS_LIST), output_field=BooleanField()
)
)
)
@extras_features("custom_links", "export_templates", "webhooks")
class Zone(PrimaryModel):
STATUS_ACTIVE = "active"
STATUS_RESERVED = "reserved"
STATUS_DEPRECATED = "deprecated"
STATUS_PARKED = "parked"
CHOICES = (
(STATUS_ACTIVE, "Active"),
(STATUS_RESERVED, "Reserved"),
(STATUS_DEPRECATED, "Deprecated"),
(STATUS_PARKED, "Parked"),
)
CSS_CLASSES = {
STATUS_ACTIVE: "primary",
STATUS_RESERVED: "info",
STATUS_DEPRECATED: "danger",
STATUS_PARKED: "warning",
}
ACTIVE_STATUS_LIST = (STATUS_ACTIVE,)
name = models.CharField(
unique=True,
max_length=255,
)
status = models.CharField(
max_length=50,
choices=CHOICES,
default=STATUS_ACTIVE,
blank=True,
)
nameservers = models.ManyToManyField(
NameServer,
related_name="zones",
blank=True,
)
tags = TaggableManager(
through="extras.TaggedItem",
blank=True,
)
default_ttl = models.PositiveIntegerField(
blank=True,
verbose_name="Default TTL",
validators=[MinValueValidator(1)],
)
soa_ttl = models.PositiveIntegerField(
blank=False,
null=False,
verbose_name="SOA TTL",
validators=[MinValueValidator(1)],
)
soa_mname = models.ForeignKey(
NameServer,
related_name="zones_soa",
verbose_name="SOA MName",
on_delete=models.PROTECT,
blank=False,
null=False,
)
soa_rname = models.CharField(
max_length=255,
blank=False,
null=False,
verbose_name="SOA RName",
)
soa_serial = models.BigIntegerField(
blank=True,
null=True,
verbose_name="SOA Serial",
validators=[MinValueValidator(1), MaxValueValidator(4294967295)],
)
soa_refresh = models.PositiveIntegerField(
blank=False,
null=False,
verbose_name="SOA Refresh",
validators=[MinValueValidator(1)],
)
soa_retry = models.PositiveIntegerField(
blank=False,
null=False,
verbose_name="SOA Retry",
validators=[MinValueValidator(1)],
)
soa_expire = models.PositiveIntegerField(
blank=False,
null=False,
verbose_name="SOA Expire",
validators=[MinValueValidator(1)],
)
soa_minimum = models.PositiveIntegerField(
blank=False,
null=False,
verbose_name="SOA Minimum TTL",
validators=[MinValueValidator(1)],
)
soa_serial_auto = models.BooleanField(
verbose_name="Generate SOA Serial",
help_text="Automatically generate the SOA Serial field",
default=True,
)
objects = ZoneManager()
clone_fields = [
"name",
"status",
"nameservers",
"default_ttl",
"soa_ttl",
"soa_mname",
"soa_rname",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
]
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("plugins:netbox_dns:zone", kwargs={"pk": self.pk})
def get_status_class(self):
return self.CSS_CLASSES.get(self.status)
def update_soa_record(self):
soa_name = "@"
soa_ttl = self.soa_ttl
soa_value = (
f"({self.soa_mname} {self.soa_rname} {self.soa_serial}"
f" {self.soa_refresh} {self.soa_retry} {self.soa_expire}"
f" {self.soa_minimum})"
)
old_soa_records = self.record_set.filter(type=Record.SOA, name=soa_name)
if len(old_soa_records):
for index, record in enumerate(old_soa_records):
if index > 0:
record.delete()
continue
if record.ttl != soa_ttl or record.value != soa_value:
record.ttl = soa_ttl
record.value = soa_value
record.managed = True
record.save()
else:
Record.objects.create(
zone_id=self.pk,
type=Record.SOA,
name=soa_name,
ttl=soa_ttl,
value=soa_value,
managed=True,
)
def update_ns_records(self, nameservers):
ns_name = "@"
ns_ttl = self.default_ttl
delete_ns = self.record_set.filter(type=Record.NS, managed=True).exclude(
value__in=nameservers
)
for record in delete_ns:
record.delete()
for ns in nameservers:
Record.raw_objects.update_or_create(
zone_id=self.pk,
type=Record.NS,
name=ns_name,
ttl=ns_ttl,
value=ns,
managed=True,
)
def check_nameservers(self):
nameservers = self.nameservers.all()
ns_warnings = []
ns_errors = []
if not nameservers:
ns_errors.append(f"No nameservers are configured for zone {self.name}")
for nameserver in nameservers:
ns_domain = ".".join(nameserver.name.split(".")[1:])
if not ns_domain:
continue
try:
ns_zone = Zone.objects.get(name=ns_domain)
except ObjectDoesNotExist:
continue
ns_name = nameserver.name.split(".")[0]
address_records = Record.objects.filter(
Q(zone=ns_zone),
Q(Q(name=f"{nameserver.name}.") | Q(name=ns_name)),
Q(Q(type=Record.A) | Q(type=Record.AAAA)),
)
if not address_records:
ns_warnings.append(
f"Nameserver {nameserver.name} does not have an address record in zone {ns_zone.name}"
)
return ns_warnings, ns_errors
def get_auto_serial(self):
records = Record.objects.filter(zone=self).exclude(type=Record.SOA)
if records:
soa_serial = (
records.aggregate(Max("last_updated"))
.get("last_updated__max")
.timestamp()
)
else:
soa_serial = ceil(datetime.now().timestamp())
if self.last_updated:
soa_serial = ceil(max(soa_serial, self.last_updated.timestamp()))
return soa_serial
def update_serial(self):
self.last_updated = datetime.now()
self.save()
def parent_zones(self):
zone_fields = self.name.split(".")
return [
f'{".".join(zone_fields[length:])}' for length in range(1, len(zone_fields))
]
def save(self, *args, **kwargs):
new_zone = self.pk is None
if not new_zone:
renamed_zone = Zone.objects.get(pk=self.pk).name != self.name
else:
renamed_zone = False
if self.soa_serial_auto:
self.soa_serial = self.get_auto_serial()
super().save(*args, **kwargs)
if (new_zone or renamed_zone) and self.name.endswith(".arpa"):
address_records = Record.objects.filter(
Q(ptr_record__isnull=True)
| Q(ptr_record__zone__name__in=self.parent_zones()),
type__in=(Record.A, Record.AAAA),
disable_ptr=False,
)
for record in address_records:
record.update_ptr_record()
elif renamed_zone:
for record in self.record_set.filter(ptr_record__isnull=False):
record.update_ptr_record()
self.update_soa_record()
def delete(self, *args, **kwargs):
with transaction.atomic():
address_records = list(self.record_set.filter(ptr_record__isnull=False))
for record in address_records:
record.ptr_record.delete()
ptr_records = self.record_set.filter(address_record__isnull=False)
update_records = [
record.pk
for record in Record.objects.filter(ptr_record__in=ptr_records)
]
super().delete(*args, **kwargs)
for record in Record.objects.filter(pk__in=update_records):
record.update_ptr_record()
@receiver(m2m_changed, sender=Zone.nameservers.through)
def update_ns_records(**kwargs):
if kwargs.get("action") not in ["post_add", "post_remove"]:
return
zone = kwargs.get("instance")
nameservers = zone.nameservers.all()
new_nameservers = [f'{ns.name.rstrip(".")}.' for ns in nameservers]
zone.update_ns_records(new_nameservers)
class RecordManager(models.Manager.from_queryset(RestrictedQuerySet)):
"""Special Manager for records providing the activity status annotation"""
def get_queryset(self):
return (
super(RecordManager, self)
.get_queryset()
.annotate(
active=ExpressionWrapper(
Q(
Q(zone__status__in=Zone.ACTIVE_STATUS_LIST)
& Q(
Q(address_record__isnull=True)
| Q(
address_record__zone__status__in=Zone.ACTIVE_STATUS_LIST
)
)
),
output_field=BooleanField(),
)
)
)
return queryset
@extras_features("custom_links", "export_templates", "webhooks")
class Record(PrimaryModel):
A = "A"
AAAA = "AAAA"
CNAME = "CNAME"
MX = "MX"
TXT = "TXT"
NS = "NS"
SOA = "SOA"
SRV = "SRV"
PTR = "PTR"
SPF = "SPF"
CAA = "CAA"
DS = "DS"
SSHFP = "SSHFP"
TLSA = "TLSA"
AFSDB = "AFSDB"
APL = "APL"
DNSKEY = "DNSKEY"
CDNSKEY = "CDNSKEY"
CERT = "CERT"
DCHID = "DCHID"
DNAME = "DNAME"
HIP = "HIP"
IPSECKEY = "IPSECKEY"
LOC = "LOC"
NAPTR = "NAPTR"
NSEC = "NSEC"
RRSIG = "RRSIG"
RP = "RP"
CHOICES = (
(A, A),
(AAAA, AAAA),
(CNAME, CNAME),
(MX, MX),
(TXT, TXT),
(SOA, SOA),
(NS, NS),
(SRV, SRV),
(PTR, PTR),
(SPF, SPF),
(CAA, CAA),
(DS, DS),
(SSHFP, SSHFP),
(TLSA, TLSA),
(AFSDB, AFSDB),
(APL, APL),
(DNSKEY, DNSKEY),
(CDNSKEY, CDNSKEY),
(CERT, CERT),
(DCHID, DCHID),
(DNAME, DNAME),
(HIP, HIP),
(IPSECKEY, IPSECKEY),
(LOC, LOC),
(NAPTR, NAPTR),
(NSEC, NSEC),
(RRSIG, RRSIG),
(RP, RP),
)
unique_ptr_qs = Q(Q(disable_ptr=False), Q(Q(type="A") | Q(type="AAAA")))
zone = models.ForeignKey(
Zone,
on_delete=models.CASCADE,
)
type = models.CharField(
choices=CHOICES,
max_length=10,
)
name = models.CharField(
max_length=255,
)
value = models.CharField(
max_length=1000,
)
ttl = models.PositiveIntegerField(
verbose_name="TTL",
)
tags = TaggableManager(
through="extras.TaggedItem",
blank=True,
)
managed = models.BooleanField(
null=False,
default=False,
)
ptr_record = models.OneToOneField(
"self",
on_delete=models.SET_NULL,
related_name="address_record",
verbose_name="PTR record",
null=True,
blank=True,
)
disable_ptr = models.BooleanField(
verbose_name="Disable PTR",
help_text="Disable PTR record creation",
default=False,
)
objects = RecordManager()
raw_objects = RestrictedQuerySet.as_manager()
clone_fields = ["zone", "type", "name", "value", "ttl", "disable_ptr"]
class Meta:
ordering = ("zone", "name", "type", "value")
constraints = (
models.UniqueConstraint(
name="unique_pointer_for_address",
fields=["type", "value"],
condition=(
models.Q(
models.Q(disable_ptr=False),
models.Q(type="A") | models.Q(type="AAAA"),
)
),
),
)
def __str__(self):
if self.name.endswith("."):
return f"{self.name} [{self.type}]"
else:
return f"{self.name}.{self.zone.name} [{self.type}]"
def get_absolute_url(self):
return reverse("plugins:netbox_dns:record", kwargs={"pk": self.id})
def fqdn(self):
return f"{self.name}.{self.zone.name}."
def ptr_zone(self):
address = ipaddress.ip_address(self.value)
if address.version == 4:
lengths = range(1, 4)
else:
lengths = range(16, 32)
zone_names = [
".".join(address.reverse_pointer.split(".")[length:]) for length in lengths
]
ptr_zones = Zone.objects.filter(Q(name__in=zone_names)).order_by(
Length("name").desc()
)
if len(ptr_zones):
return ptr_zones[0]
def update_ptr_record(self):
ptr_zone = self.ptr_zone()
if ptr_zone is None or self.disable_ptr:
if self.ptr_record is not None:
with transaction.atomic():
self.ptr_record.delete()
self.ptr_record = None
return
ptr_name = ipaddress.ip_address(self.value).reverse_pointer.replace(
f".{ptr_zone.name}", ""
)
ptr_value = self.fqdn()
ptr_record = self.ptr_record
with transaction.atomic():
if ptr_record is not None:
if ptr_record.zone.pk != ptr_zone.pk:
ptr_record.delete()
ptr_record = None
else:
if (
ptr_record.name != ptr_name
or ptr_record.value != ptr_value
or ptr_record.ttl != self.ttl
):
ptr_record.name = ptr_name
ptr_record.value = ptr_value
ptr_record.ttl = self.ttl
ptr_record.save()
if ptr_record is None:
ptr_record = Record.objects.create(
zone_id=ptr_zone.pk,
type=Record.PTR,
name=ptr_name,
ttl=self.ttl,
value=ptr_value,
managed=True,
)
self.ptr_record = ptr_record
super().save()
def save(self, *args, **kwargs):
if self.type in (self.A, self.AAAA):
self.update_ptr_record()
super().save(*args, **kwargs)
zone = self.zone
if self.type != self.SOA and zone.soa_serial_auto:
zone.update_serial()
def delete(self, *args, **kwargs):
if self.ptr_record:
self.ptr_record.delete()
super().delete(*args, **kwargs)
zone = self.zone
if zone.soa_serial_auto:
zone.update_serial()
```
#### File: netbox_dns/tests/test_auto_soa.py
```python
import re
from django.test import TestCase
from netbox_dns.models import NameServer, Zone, Record
def parse_soa_value(soa):
soa_match = re.match(
r"^\((\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\)", soa
)
return {
"soa_mname": soa_match.group(1),
"soa_rname": soa_match.group(2),
"soa_serial": int(soa_match.group(3)),
"soa_refresh": int(soa_match.group(4)),
"soa_retry": int(soa_match.group(5)),
"soa_expire": int(soa_match.group(6)),
"soa_minimum": int(soa_match.group(7)),
}
class AutoSOATest(TestCase):
zone_data = {
"default_ttl": 86400,
"soa_rname": "hostmaster.example.com",
"soa_refresh": 172800,
"soa_retry": 7200,
"soa_expire": 2592000,
"soa_ttl": 86400,
"soa_minimum": 3600,
"soa_serial": 1,
"soa_serial_auto": False,
}
@classmethod
def setUpTestData(cls):
cls.nameservers = [
NameServer(name="ns1.example.com"),
NameServer(name="ns2.example.com"),
]
NameServer.objects.bulk_create(cls.nameservers)
cls.zone = Zone.objects.create(
name="zone1.example.com", **cls.zone_data, soa_mname=cls.nameservers[0]
)
def test_zone_soa(self):
zone = self.zone
nameserver = self.nameservers[0]
soa_records = Record.objects.filter(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_records[0].value)
self.assertTrue(
all(
(
zone.soa_mname.name == soa.get("soa_mname"),
zone.soa_rname == soa.get("soa_rname"),
zone.soa_serial == soa.get("soa_serial"),
zone.soa_refresh == soa.get("soa_refresh"),
zone.soa_retry == soa.get("soa_retry"),
zone.soa_expire == soa.get("soa_expire"),
zone.soa_minimum == soa.get("soa_minimum"),
zone.soa_ttl == soa_records[0].ttl,
len(soa_records) == 1,
)
)
)
def test_zone_soa_change_mname(self):
zone = self.zone
nameserver2 = self.nameservers[1]
zone.soa_mname = nameserver2
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(nameserver2.name, soa.get("soa_mname"))
def test_zone_soa_change_rname(self):
zone = self.zone
rname = "new-hostmaster.example.com"
zone.soa_rname = rname
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(rname, soa.get("soa_rname"))
def test_zone_soa_change_serial(self):
zone = self.zone
serial = 42
zone.soa_serial = serial
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(serial, soa.get("soa_serial"))
def test_zone_soa_change_refresh(self):
zone = self.zone
refresh = 23
zone.soa_refresh = refresh
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(refresh, soa.get("soa_refresh"))
def test_zone_soa_change_retry(self):
zone = self.zone
retry = 2342
zone.soa_retry = retry
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(retry, soa.get("soa_retry"))
def test_zone_soa_change_expire(self):
zone = self.zone
expire = 4223
zone.soa_expire = expire
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(expire, soa.get("soa_expire"))
def test_zone_soa_change_minimum(self):
zone = self.zone
minimum = 4223
zone.soa_minimum = minimum
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
soa = parse_soa_value(soa_record.value)
self.assertEqual(minimum, soa.get("soa_minimum"))
def test_zone_soa_change_ttl(self):
zone = self.zone
ttl = 422342
zone.soa_ttl = ttl
zone.save()
soa_record = Record.objects.get(type=Record.SOA, zone=zone)
self.assertEqual(ttl, soa_record.ttl)
```
|
{
"source": "JeroenvO/CZ-Stats",
"score": 3
}
|
#### File: JeroenvO/CZ-Stats/__init__.py
```python
import gc
from time import sleep
from math import ceil
import buttons
import defines
import rgb
import system
import uinterface
import urequests
import wifi
# globals
stat = 0
old_stat = 0
l = None
# colors
colors = [
# cmy
[(0, 255, 255), (255, 0, 255), (255, 255, 0)],
[(255, 255, 0), (0, 255, 255), (255, 0, 255)],
[(255, 0, 255), (255, 255, 0), (0, 255, 255)],
# rgb
[(255, 0, 0), (0, 255, 0), (0, 0, 255)],
[(0, 0, 255), (255, 0, 0), (0, 255, 0)],
[(0, 255, 0), (0, 0, 255), (255, 0, 0)],
# white
[(255, 255, 255), (255, 255, 255), (255, 255, 255)],
]
color = 0
# buttons
UP, DOWN, LEFT, RIGHT = defines.BTN_UP, defines.BTN_DOWN, defines.BTN_LEFT, defines.BTN_RIGHT
A, B = defines.BTN_A, defines.BTN_B
def input_up(pressed):
if pressed:
global color
color = (color + 1) % (len(colors))
draw_text()
def input_down(pressed):
if pressed:
global color
color = (color - 1) % (len(colors))
draw_text()
def input_left(pressed):
if pressed:
global stat
stat = 1 - stat
def input_right(pressed):
if pressed:
global stat
stat = 1 - stat
def input_B(pressed):
if pressed:
rgb.clear()
rgb.text("Bye!")
sleep(0.5)
system.reboot()
def input_A(pressed):
if pressed:
rgb.background((255, 100, 100))
else:
rgb.background((0, 0, 0))
def draw_error(e):
rgb.clear()
rgb.pixel((255, 0, 0), (REFRESH_RATE, 7)) # red for error
rgb.text('E {}'.format(e))
def draw_text():
global l, color
rgb.clear()
if l:
rgb.pixel((0, 150, 0), (REFRESH_RATE, 7)) # green for new data
for i, d in enumerate(l):
rgb.text(d, colors[color][i], (ceil(31/len(l))*i, 0))
else:
rgb.text('E Data')
# init
buttons.register(UP, input_up)
buttons.register(DOWN, input_down)
buttons.register(LEFT, input_left)
buttons.register(RIGHT, input_right)
buttons.register(B, input_B)
buttons.register(A, input_A)
rgb.setfont(rgb.FONT_6x3)
rgb.framerate(10) # second updates
REFRESH_RATE = 31 # times framerate updates.
# wifi connect
if not wifi.status():
if not uinterface.connect_wifi():
system.reboot()
rgb.text('Hi!')
# main loop
count = REFRESH_RATE - 1 # start fast
while True:
if not wifi.status():
if not uinterface.connect_wifi():
system.reboot()
if count < REFRESH_RATE and stat == old_stat:
gc.collect()
sleep(0.1)
rgb.pixel((150, 150, 0), (count, 7)) # refresh counter
count += 1
continue
else:
count = 0
old_stat = stat
if stat == 0: # generator
try:
r = urequests.post("https://dashboard.eventinfra.org/api/datasources/proxy/1/render",
data='target=infra.ACT_PWR_1_generator_tot_kva&target=infra.ACT_PWR_2_generator_tot_kva&target=infra.ACT_PWR_3_generator_tot_kva&from=-3min&until=now&format=json&maxDataPoints=768')
except:
draw_error('req')
continue
if r.status_code == 200:
# rgb.clear()
try:
l = [str(int(i['datapoints'][-1][0])) for i in r.json()]
except:
draw_error('json')
continue
draw_text()
else:
draw_error(r.status_code)
elif stat == 1: # up/down link
try:
r = urequests.post("https://dashboard.eventinfra.org/api/datasources/proxy/1/render",
data='target=scale(scaleToSeconds(nonNegativeDerivative(net.kvm2.snmp.if_octets-eth3_300.tx),1),8)&target=scale(scaleToSeconds(nonNegativeDerivative(net.kvm2.snmp.if_octets-eth3_300.rx),1),8)&from=-5min&until=now&format=json&maxDataPoints=768')
except:
rgb.text("E req")
continue
if r.status_code == 200:
try:
l = [str(int(i['datapoints'][-1][0] / 1e6)) for i in r.json()]
except:
draw_error('json')
continue
draw_text()
else: # non 200 status code
draw_error(str(r.status_code))
```
|
{
"source": "JeroenvO/pulsedpowerplasmaplots",
"score": 3
}
|
#### File: analyze/scope_parse/c_get_lines.py
```python
import os
from analyze.scope_parse.a_easyscope_parser import parse_file
from analyze.scope_parse.b_correct_lines import correct_lines
from analyze.defines import *
def get_vol_cur_single(filename,
current_scaling = 0.5,
delay=0,
voltage_offset=None,
current_offset=None,
splitted_pulse=False):
"""
Parse voltage and current from waveforms.
:param filename: filepath without extension
:return: [time, v, i] of waveform
"""
line_objs = parse_file(filename) # file to parse
offsets = [
{'v_shift': delay, # -16 works fine for exact match of waveforms with different cable length. Otherwise 0
'div_zero': voltage_offset}, # if voltage has another div_zero than current
{'val_div_correct': current_scaling, # -100 for Pearson 0.1v/a inverted.
'div_zero': current_offset}
# {},
# {}
]
time_axis, y_axes = correct_lines(line_objs, offsets=offsets)
v = y_axes[0]
i = y_axes[1]
# for splitted pulse, disable these assertions.
assert MAX_VOLTAGE_MIN < max(v) < MAX_VOLTAGE_MAX, "Voltage max (%r) scaling incorrect!" % max(v)
assert MIN_VOLTAGE_MIN < min(v) < MIN_VOLTAGE_MAX, "Voltage min (%r) scaling incorrect!" % min(v)
if not MAX_CURRENT_MIN < max(i) < MAX_CURRENT_MAX: # max current between 2A and 30A
if max(i) < 0.03:
print("Warning!, scope current corrected for mV to V!")
i *= 1000
elif max(i) > 1000: # minimum 1A max
print("Warning!, scope current corrected for V to mV!")
i /= 1000
else:
raise Exception("Current scaling is incorrect! Max: "+str(max(i))+', Min: '+str(min(i)))
assert MAX_CURRENT_MIN < max(i) < MAX_CURRENT_MAX, "Current max (%r) scaling incorrect!" % max(i)
assert MIN_CURRENT_MIN < min(i) < MIN_CURRENT_MAX, "Current min (%r) scaling incorrect!" % min(i)
# assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!'
return [time_axis, v, i]
def get_vol_cur_dir(path):
"""
Get list of [[time, vol, cur], .. ] for each file in 'path'
:param path: search path
:return: list of lists with time, vol and cur.
"""
dir = os.listdir(path)
lines = []
for file in dir:
lines.append(get_vol_cur_single(path+'/'+file) + [file])
return lines
def get_vol_cur_multiple(base_filename, **kwargs):
"""
Used if multiple scope waveforms are captured per measurement.
These waveforms are all appended to the data in calc_run.py
It will be used in e_average.py to calculate average powers for pulses.
:param base_filename: base filename/path without extension or _.
:return: list of lists with [time, v, i] waveforms. One for each obtained waveform
"""
i = 0
lines = []
while True:
try:
# print(base_filename, i)
lines.append(get_vol_cur_single(base_filename+'_'+str(i),
**kwargs))
i += 1
except IOError:
break
except Exception as e:
raise Exception(e)
return lines
```
#### File: visualize/final_v1/plot_pe.py
```python
import matplotlib.pyplot as plt
from visualize.helpers.plot import save_file, set_plot
from visualize.helpers.data import filter_data
def plot_pe(data, reactor):
"""
Plot power and energy waveform, in two subplots
:param data:
:param reactor:
:return:
"""
data = filter_data(data, input_v_output=15e3, input_f=10, input_l=1)[0]
fig, ax = plt.subplots(2, 1, sharex=True)
x_axis = data['output_t'][0]*1e6
p_axis = data['output_p'][0]/1e3
e_axis = data['output_e'][0]*1e3
ax[0].plot(x_axis, p_axis, color='black')
ax[1].plot(x_axis, e_axis, color='black')
ax[0].set_ylabel('P [kW]')
ax[1].set_ylabel('E [mJ]')
set_plot(fig, 2, pulse=True)
save_file(fig, name='pe-'+reactor, path='G:/Prive/MIJN-Documenten/TU/62-Stage/05_python/plots_final_v1')
if __name__ == '__main__':
from visualize.helpers.data import load_pickle
data = load_pickle('20180115/run1')
plot_pe(data, 'short-glass')
```
#### File: final_v2/burst/plot_edens_yield_paper.py
```python
import matplotlib.pyplot as plt
import numpy as np
from visualize.helpers.colors import color_plasma
from visualize.helpers.data import filter_data, reactor_inducance_index
from visualize.helpers.plot import save_file, set_plot, set_unique_legend
from visualize.helpers.burst import calc_burst
def plot_edens_yield(datas):
"""
Make various plots to energy density
:param datas:
:return:
"""
fig, ax = plt.subplots(2, 1, sharex=True)
m = 'o'
ui = np.array([200, 150, 100, 75, 50])
offset = 1 # skip bright yellow color
colors = color_plasma(len(ui)+offset)
# sort data, to keep the legend in the right order.
datas = sorted(datas, key=lambda x:x[0]['burst_inner_f'])
for i, data in enumerate(datas):
l = str(data[0]['burst_inner_f']) + ' kHz'
data = filter_data(data, input_v_output=15e3, output_yield_gkwh__gt=25)
c = colors[np.where(data[0]['burst_inner_f'] == ui)[0][0]+offset]
burstdata = calc_burst(data)
line = data[0] # because all data is the same in one burst run
edens = burstdata['output_energy_dens']
ax[1].scatter(edens, burstdata['output_yield_gkwh'], label=l, c=c, marker=m)
ax[0].scatter(edens, line['o3_ppm'], label=l, c=c, marker=m)
ax[1].set_ylabel('Yield [g/kWh]')
# ax[1].set_ylabel('Production [g/h]')
# ax_dens[1].set_ylim([0, 7e-5])
# ax_dens[2].set_ylim([0, 2e3])
# ax[0].set_ylim([0, 120])
ax[0].set_ylabel('Ozone [ppm]')
ax[0].text(20, 220, '50 Hz')
ax[0].text(45, 250, '100 Hz')
ax[0].text(85, 550, '200 Hz')
ax[1].set_xlabel('Energy density [J/l]')
set_unique_legend(ax[0])
set_plot(fig, plot_height=2, from_zero=False)
save_file(fig, name='edens-all-burst-paper', path='plots_final_v2/burst')
if __name__ == '__main__':
pass
```
#### File: final_v2/burst/plot_f_epulse.py
```python
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from visualize.helpers.colors import color2
from visualize.helpers.data import filter_data, get_values
from visualize.helpers.plot import markers
from visualize.helpers.plot import save_file, set_plot, interpolate_plot
def plot_f_epulse(datas):
"""
Plots energy per pulse for various frequencies
5 pulses, 100hz
:param data:
:param reactor:
:return:
"""
fig, ax = plt.subplots()
marker_legends = []
for i, data in enumerate(datas):
data = filter_data(data, input_v_output=15e3, input_l=1)
# l = str(data[0]['burst_inner_f']) + ' kHz, ' + str(data[0]['burst_pulses'])
c = color2[i]
center = get_values(data, 'output_e_plasma')*1000
all = [np.array(list)*1000 for list in get_values(data, 'output_e_plasma_single')]
x = range(1,1+len(data))
interpolate_plot(ax, x, center)
m = markers[i]
for j, line in enumerate(data):
epuls = line['output_e_plasma']*1000 # array of values, to mJ.
plt.scatter(j+1, epuls, c=c, marker=m)
marker_legends.append(
mlines.Line2D([], [], marker=m, label=str(len(data))+' pulses', color='grey', markerfacecolor=c, markeredgewidth=0))
# mi = [y2a - min(z2a) for z2a, y2a in zip(all, center)] # list of minima of y
# ma = [max(z2a) - y2a for z2a, y2a in zip(all, center)] # list of maxima of y
std = [np.std(z2a) for z2a in all]
ax.errorbar(x, center, yerr=std, xerr=None, ecolor=c, fmt='none', capsize=3)
# add x labels
ax.set_xlabel('Pulse number')
ax.set_ylabel('Pulse plasma energy [mJ]')
ax.legend(handles=marker_legends)
ax.set_xlim(left=0)
set_plot(fig)
save_file(fig, name='epulse-burst', path='plots_final_v2/burst')
```
#### File: final_v2/normal/plot_f_epulse.py
```python
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from visualize.helpers.data import load_pickle, filter_data, get_values
from visualize.helpers.plot import save_file, set_plot
def plot_f_epulse(data, reactor):
"""
Plots energy per pulse for various frequencies as boxplot
:param data:
:param reactor:
:return:
"""
data = filter_data(data, input_v_output=15e3, input_l=1)
fig, ax = plt.subplots()
uf = np.unique(get_values(data, 'input_f'))
plotdata = []
for f in uf:
d = filter_data(data, input_f=f)
l = get_values(d, key='output_e_plasma_single') # returns list of arrays with values.
v = np.concatenate(l)
epuls = np.array(v)*1000 # array of values
plotdata.append(epuls)
plt.boxplot(plotdata)
# add x labels
num_boxes = len(uf)
plt.xticks(range(num_boxes+1), ['']+list(uf), rotation=45)
# add top x-labels with number of sample points
upperLabels = [''] + [str(len(s)) for s in plotdata]
ax2 = ax.twiny()
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks(ax.get_xticks())
ax2.set_xticklabels(upperLabels)
ax2.set_xlabel("Number of samples")
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Pulse plasma energy [mJ]')
set_plot(fig, plot_height=1.4)
save_file(fig, name='epulse-'+reactor, path='plots_final_v2/normal')
if __name__ == '__main__':
reactors = ['long-glass-46uH', 'long-glass-26uH', 'short-glass-nocoil', 'short-glass-26uH', 'short-glass-8uH']
reactor = reactors[3]
print(reactor)
if reactor == 'long-glass-46uH':
data = load_pickle("20180115-def1/run6")
elif reactor == 'long-glass-26uH':
data = load_pickle("20180115-def1/run5")
elif reactor == 'short-glass-26uH':
data = load_pickle("20180115-def1/run2")
elif reactor == 'short-glass-8uH':
data = load_pickle("20180115-def1/run3")
elif reactor == 'short-glass-nocoil':
data = load_pickle("20180115-def1/run1")
else:
raise Exception("No input!")
plot_f_epulse(data, reactor)
```
#### File: visualize/helpers/burst.py
```python
import numpy as np
from visualize.helpers.data import get_values
def calc_burst(data):
"""
Recalculate values from d_calc for a run with bursts where each measurement is one pulse form the burst
:param data:
:return:
"""
pulse_energy = []
burst_energy = 0
burst = len(data)
freq = data[0]['burst_f']
print("Burst with "+str(burst) + ' pulses')
pulse_energy = np.average(get_values(data, key='output_e_plasma')) # average energy in each burst pulse
burst_energy = sum(get_values(data, key='output_e_plasma')) # sum energy in one burst of n pulses
output_p_plasma = freq * burst_energy
lss = np.average(get_values(data, 'airflow_ls'))
o3f = np.average(get_values(data, 'o3_gramsec'))
ppm = np.average(get_values(data, 'o3_ppm'))
input_p = np.average(get_values(data, 'input_p'))
dic ={
'e_plasma_burst': burst_energy,
'e_plasma_avg': pulse_energy,
'p_plasma': output_p_plasma,
'output_energy_dens': output_p_plasma / lss,
'output_yield_gj': o3f / output_p_plasma if output_p_plasma else 0,
'output_yield_gkwh': o3f / (output_p_plasma / 3.6e6) if output_p_plasma else 0,
'e_eff': output_p_plasma / input_p if output_p_plasma else 0,
'ppm': ppm,
}
return dic
```
#### File: visualize/helpers/data.py
```python
import operator
import os
import pickle
import numpy as np
from analyze.defines import *
def load_pickle(path):
if path[-4:] != '.pkl':
if path[-5] == '.' or path[-4] == '.':
# random file with extension
return None
elif path[-8:] != 'data.pkl':
path = path + '/data.pkl'
else:
path = path + '.pkl'
if not os.path.exists(path):
# path = 'G:/Prive/MIJN-Documenten/TU/62-Stage/' + path # try full path.
path = 'D:/ownCloud/DATA/MIJN-Documenten/TU/6-Stage/' + path
assert os.path.exists(path)
with open(path, 'rb') as f:
d = pickle.load(f)
assert any(d)
return d
def get_values(dicts, key):
"""
Get all values from a list of dicts with a given key
stop if list is empty or zero.
Takes one 'column' of the data, as analogy to the generated excel file.
:param dicts: the list of dicts to search
:param key: the key to search each dict for
:return: list of values
"""
assert any(dicts)
assert key in dicts[0]
a = np.array([d[key] if key in d else 0 for d in dicts])
# assert any(a)
return a
def load_pickles(dir, filename='data.pkl'):
"""
Load pickles from all directories in a path.
:param dir: dir with subdirs which have data.pkl
:return: list of dicts with processed measure data
"""
data = []
if not os.path.exists(dir):
dir = 'D:/ownCloud/DATA/MIJN-Documenten/TU/6-Stage/' + dir # try full path.
dirs = os.listdir(dir, )
for tdir in dirs:
if os.path.isdir(dir+'/'+tdir):
try:
data += load_pickle(dir + '/' + tdir + '/' + filename)
except:
pass # invalid dir
assert any(data)
return data
def filter_data(data, **kwargs):
"""
Filter a list of dicts for given key=value in the dict
append '__<operator>' at key to choose custom operator from operator module.
:param data: data to filter, array of dicts from pickle file
:param kwargs: key=value, where key is key of dict and value is value to filter.
:return: filtered data
"""
assert any(data)
for key, value in kwargs.items():
key = key.split('__')
op = key[1] if len(key) == 2 else 'eq'
f = getattr(operator, op)
# only check data[0], assume all dicts have the same keys
assert key[0] in data[0], '%r is not found in dictionary!' % key[0]
if op in ['contains']: # reverse order of arguments for these ops.
data = [d for d in data if f(value, d[key[0]])]
else:
data = [d for d in data if f(d[key[0]], value)]
assert any(data), "Filter on key %r returned no data!" % str(key)
return data
def sort_data(data, key):
"""
Sort a list of dicts by a given key
:param data: input list of dicts
:param key: key to sort
:return: sorted list of dicts
"""
assert any(data)
return sorted(data, key=lambda k: k[key])
def reactor_inducance_index(reactor, inductance):
"""
Return an index based on combination of reactor and inductance
:param reactor:
:param inductance:
:return:
"""
if reactor == REACTOR_GLASS_SHORT_QUAD:
assert inductance in INDUCTANCE_SHORT_REACTOR
return INDUCTANCE_SHORT_REACTOR.index(inductance)
elif reactor == REACTOR_GLASS_LONG:
assert inductance in INDUCTANCE_LONG_REACTOR
return INDUCTANCE_LONG_REACTOR.index(inductance) + len(INDUCTANCE_SHORT_REACTOR)
else:
raise Exception("Invalid reactor!")
def annotate_data(data, **kwargs):
"""
Annotate each item in data with a key=value from kwargs
:param data: input list of dicts
:param kwargs: key=val to annotate
:return:
"""
for key, val in kwargs.items():
for i, line in enumerate(data):
data[i][key] = val
return data
```
#### File: poster/normal/plot_edens_yield.py
```python
import matplotlib.pyplot as plt
from visualize.helpers.colors import color_plasma_3
from visualize.helpers.data import filter_data, reactor_inducance_index
from visualize.helpers.plot import save_file, set_plot, set_unique_legend, markers
def plot_edens_yield(data):
"""
Make various plots to energy density as scatterplot
:param data:
:param reactor:
:return:
"""
data = filter_data(data, input_v_output=15e3, input_l=1, output_yield_gkwh__gt=25)
fig, ax = plt.subplots(4, 1, sharex=True)
colors = color_plasma_3
# m = 'o'
# interpolate_plot(ax[0], x, get_values(data, 'output_yield_gkwh'))
# interpolate_plot(ax[1], x, get_values(data, 'o3_gramsec')*3600)
# interpolate_plot(ax[2], x, get_values(data, 'o3_ppm'))
# interpolate_plot(ax[3], x, get_values(data, 'input_p'))
# interpolate_plot(ax[3], x, get_values(data, 'output_p_avg'))
# interpolate_plot(ax[4], x, get_values(data, 'input_f'))
for line in data:
reactor = line['reactor']
inductance = line['inductance']
i = reactor_inducance_index(reactor, inductance)
l = reactor + ' ' + (str(inductance)+'$\,\mu H$' if inductance else 'no coil')
c = colors[i]
m = markers[i]
edens = line['output_energy_dens']
ax[0].scatter(edens, line['output_yield_gkwh'], label=l, c=c, marker=m)
# ax_freq[0].scatter(freq, line['input_yield_gkwh'])
ax[1].scatter(edens, line['o3_ppm'], label=l, c=c, marker=m)
ax[2].scatter(edens, line['e_eff']*100, label=l, c=c, marker=m)
ax[3].scatter(edens, line['input_f'], label=l, c=c, marker=m)
ax[0].set_ylabel('Yield [g/kWh]')
# ax[1].set_ylabel('Production [g/h]')
# ax_dens[1].set_ylim([0, 7e-5])
# ax_dens[2].set_ylim([0, 2e3])
# ax[0].set_ylim([0, 120])
ax[1].set_ylabel('Ozone [ppm]')
ax[2].set_ylabel('Energy efficiency [%]')
ax[3].set_ylabel('Frequency [Hz]')
ax[3].set_xlabel('Energy density [J/l]')
set_unique_legend(ax[1])
set_plot(fig, plot_height=3)
save_file(fig, name='edens-all', path='plots_poster/normal')
if __name__ == '__main__':
pass
```
|
{
"source": "jeroenvuurens/pipetorch",
"score": 2
}
|
#### File: pipetorch/data/imagecollection.py
```python
import pandas as pd
import numpy as np
import torch
import math
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, random_split, Subset
import matplotlib.pyplot as plt
from torchvision.datasets import MNIST, ImageFolder, CIFAR10
from torchvision.transforms import transforms
import os
import matplotlib
import matplotlib.patheffects as PathEffects
from IPython.core import pylabtools
from pathlib import Path
import sys
from IPython import get_ipython
from tqdm.notebook import tqdm
import ipywidgets as widgets
import io
from PIL import Image, ImageStat
from getpass import getuser
from ..evaluate.evaluate import Evaluator
ipython = get_ipython()
back2gui = { b:g for g, b in pylabtools.backends.items() }
class plt_gui:
def __init__(self, gui):
self.gui = gui
def __enter__(self):
backend = matplotlib.get_backend()
self.old_gui = back2gui[backend]
ipython.magic('matplotlib ' + self.gui)
def __exit__(self, *args):
ipython.magic('matplotlib ' + self.old_gui)
class plt_inline(plt_gui):
def __init__(self):
super().__init__('inline')
class plt_notebook(plt_gui):
def __init__(self):
super().__init__('notebook')
def subplots(rows, cols, imgsize=4, figsize=None, title=None, **kwargs):
"Like `plt.subplots` but with consistent axs shape, `kwargs` passed to `fig.suptitle` with `title`"
if figsize is None:
figsize = (imgsize*cols, imgsize*rows)
fig, axs = plt.subplots(rows,cols,figsize=figsize)
if rows==cols==1:
axs = [[axs]]
elif (rows==1 and cols!=1) or (cols==1 and rows!=1):
axs = [axs]
if title is not None:
fig.suptitle(title, **kwargs)
return np.array(axs)
def sample(self, device=None):
X, y = self.one_batch()
if device is not None:
return X.to(device), y.to(device)
return X, y
class ImageDataset(Dataset):
"""Image dataset."""
def __init__(self, *args, transform=None, **kwargs):
"""
Args:
transform (callable, optional): Optional transform to be applied
on a sample.
"""
super.__init__(*args, **kwargs)
self.transform = transform
def __getitem__(self, idx):
item = super.__getitem__(idx)
if self.transform:
item = self.transform(item)
return item
class image_databunch:
def __init__(self, train_ds, valid_ds, batch_size=32, valid_batch_size=None, shuffle=True, num_workers=0,
pin_memory=False, valid_pin_memory=None, normalized_mean=None, normalized_std=None,
classes=None, class_to_idx=None):
self.train_ds = train_ds
self.valid_ds = valid_ds
self.batch_size = batch_size
self.valid_batch_size = batch_size if valid_batch_size is None else valid_batch_size
self.valid_pin_memory = pin_memory if valid_pin_memory is None else valid_pin_memory
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.normalized_mean = normalized_mean
self.normalized_std = normalized_std
self.classes = classes
self.class_to_idx = class_to_idx
@staticmethod
def balance(X, y):
indices = [np.where(y==l)[0] for l in np.unique(y)]
classlengths = [len(i) for i in indices]
n = max(classlengths)
mask = np.hstack([np.random.choice(i, n-l, replace=True) for l,i in zip(classlengths, indices)])
indices = np.hstack([mask, range(len(y))])
return X[indices], y[indices]
def to(self, device):
try:
self.train_ds.data.to(device)
except: pass
try:
self.train_ds.targets.to(device)
except: pass
try:
self.valid_ds.data.to(device)
except: pass
try:
self.valid_ds.targets.to(device)
except: pass
self.device=device
return self
def cpu(self):
return self.to(torch.device('cpu'))
def gpu(self):
return self.to(torch.device('cuda:0'))
@property
def batch_size(self):
return self._batch_size
@batch_size.setter
def batch_size(self, value):
self._batch_size = min(value, len(self.train_ds))
self.reset()
@property
def num_workers(self):
return self._num_workers
@num_workers.setter
def num_workers(self, value):
self._num_workers = value
self.reset()
def evaluate(self, *metrics):
#assert len(metrics) > 0, 'You need to provide at least one metric for the evaluation'
return Evaluator(self, *metrics)
@property
def labels(self):
return self._labels
@property
def train_dl(self):
try:
return self._train_dl
except:
self._train_dl = DataLoader(self.train_ds, num_workers=self.num_workers, shuffle=self.shuffle, batch_size=self.batch_size, pin_memory=self.pin_memory)
return self._train_dl
@train_dl.setter
def train_dl(self, dl):
self._train_dl = dl
@property
def valid_dl(self):
try:
return self._valid_dl
except:
self._valid_dl = DataLoader(self.valid_ds, shuffle=False, num_workers=self.num_workers, batch_size=self.valid_batch_size, pin_memory=self.valid_pin_memory)
return self._valid_dl
@valid_dl.setter
def valid_dl(self, dl):
self._valid_dl = dl
@property
def train_X(self):
return self.train_ds.data
@property
def train_y(self):
return self.train_ds.targets
@property
def valid_X(self):
return self.valid_ds.data
@property
def valid_y(self):
return self.valid_ds.targets
@property
def train_numpy(self):
return to_numpy(self.train_X), to_numpy(self.train_y)
@property
def valid_numpy(self):
return to_numpy(self.valid_X), to_numpy(self.valid_y)
def sample(self, device=None):
X, y = next(iter(self.train_dl))
if device is not None:
return X.to(device), y.to(device)
return X, y
def reset(self):
try:
del self.valid_dl
except: pass
try:
del self._train_dl
except: pass
def show_batch(self, rows=3, imgsize=(20,20), figsize=(10,10)):
with plt_inline():
old_backend = matplotlib.get_backend()
Xs, ys = next(iter(self.train_dl))
Xs = Xs[:rows*rows]
ys = ys[:rows*rows]
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize)
invnormalize = self.inv_normalize()
for x,y,ax in zip(Xs, ys, axs.flatten()):
x = x.cpu()
x = invnormalize(x)
im = transforms.ToPILImage()(x).convert("RGB")
im = transforms.Resize([100,100])(im)
ax.imshow(im)
try:
y = self.classes[y]
except: pass
ax.set_title(f'y={y}')
for ax in axs.flatten()[len(Xs):]:
ax.axis('off')
plt.tight_layout()
plt.show()
@classmethod
def get_transformations_train(cls, size=224, crop_size=None, crop_padding=None, color_jitter=None, rotate=None, do_flip=True, normalize_mean=None, normalize_std=None):
return cls.get_transformations(size=size, crop_size=crop_size, crop_padding=crop_padding, color_jitter=color_jitter, rotate=rotate, do_flip=do_flip, normalize_mean=normalize_mean, normalize_std=normalize_std)
@classmethod
def get_transformations(cls, size=224, crop_size=None, crop_padding=None, color_jitter=None, rotate=None, do_flip=None, normalize_mean=None, normalize_std=None):
t = []
if rotate is not None:
t.append(transforms.RandomRotation(rotate))
if color_jitter is not None:
t.append(transforms.ColorJitter(*color_jitter))
if crop_size is not None or crop_padding is not None:
if crop_size is None:
crop_size = size
if crop_padding is None:
crop_padding = 0
t.append(transforms.RandomCrop(crop_size, padding=crop_padding, pad_if_needed=True))
if size is not None:
t.append(transforms.Resize([size,size]))
if do_flip:
t.append(transforms.RandomHorizontalFlip())
t.append(transforms.ToTensor())
if normalize_mean is not None and normalize_std is not None:
t.append(transforms.Normalize(mean=normalize_mean, std=normalize_std))
return transforms.Compose( t )
def inv_normalize(self):
if self.normalized_std is not None and self.normalized_mean is not None:
return transforms.Normalize(mean=tuple(-m/s for m, s in zip(self.normalized_mean, self.normalized_std)), std=tuple(1/s for s in self.normalized_std))
try:
for l in self.train_ds.transform.transforms:
if type(l) == transforms.Normalize:
return transforms.Normalize(mean=tuple(-m/s for m, s in zip(l.mean, l.std)), std=tuple(1/s for s in l.std))
except:pass
try:
for l in self.train_ds.dataset.transform.transforms:
if type(l) == transforms.Normalize:
return transforms.Normalize(mean=tuple(-m/s for m, s in zip(l.mean, l.std)), std=tuple(1/s for s in l.std))
except:pass
return lambda x:x
@staticmethod
def tensor_ds(ds):
try:
ds1 = TransformableDataset(ds, transforms.ToTensor())
ds1[0][0].shape[0]
return ds1
except:
return ds
@staticmethod
def channels(ds):
return image_databunch.tensor_ds(ds)[0][0].shape[0]
@classmethod
def train_normalize(cls, ds):
ds = image_databunch.tensor_ds(ds)
channels = image_databunch.channels(ds)
total_mean = []
total_std = []
for c in range(channels):
s = torch.cat([X[c].view(-1) for X, y in ds])
total_mean.append(s.mean())
total_std.append(s.std())
return torch.tensor(total_mean), torch.tensor(total_std)
@classmethod
def from_image_folder(cls, path, valid_size=0.2, target_transform=None, size=224, crop_size=None, crop_padding=None, color_jitter=None, rotate=None, do_flip=None, normalize_mean=None, normalize_std=None, normalize=False, **kwargs):
ds = ImageFolder(root=path, target_transform=target_transform)
split = int((1-valid_size) * len(ds))
indices = list(range(len(ds)))
np.random.shuffle(indices)
train_idx, valid_idx = indices[:split], indices[split:]
if normalize:
assert normalize_mean is None and normalize_std is None, 'You cannot set normalize=True and give the mean or std'
normalize_mean, normalize_std = cls.train_normalize(Subset(ds, train_idx))
train_transforms = cls.get_transformations_train(size=size, crop_size=crop_size, crop_padding=crop_padding, color_jitter=color_jitter, rotate=rotate, do_flip=do_flip, normalize_mean=normalize_mean, normalize_std=normalize_std)
valid_transforms = cls.get_transformations(size=size, normalize_mean=normalize_mean, normalize_std=normalize_std)
train_ds = TransformableDataset(Subset(ds, train_idx), train_transforms)
valid_ds = TransformableDataset(Subset(ds, valid_idx), valid_transforms)
return cls(train_ds, valid_ds, classes=ds.classes, class_to_idx=ds.class_to_idx,
normalized_mean=normalize_mean, normalized_std=normalize_std, **kwargs)
@classmethod
def from_image_folders(cls, trainpath, validpath, size=None, transform=None, target_transform=None, **kwargs):
if type(transform) is int:
train_transforms = cls.get_transformations_train(size=transform)
valid_transforms = cls.get_transformations(size=transform)
elif type(transform) is dict:
train_transforms = cls.get_transformations_train(**transform)
valid_transforms = cls.get_transformations(**transform)
elif type(transform) is tuple:
train_transforms, valid_transforms = transform
elif transform is None:
train_transforms = transforms.Compose( [transforms.ToTensor()] )
valid_transforms = train_transforms
else:
train_transforms = transform
valid_transforms = transform
train_ds = ImageFolder(root=trainpath, transform=train_transforms, target_transform=target_transform)
valid_ds = ImageFolder(root=validpath, transform=valid_transforms, target_transform=target_transform)
return cls(train_ds, valid_ds, classes=train_ds.classes, class_to_idx=train_ds.class_to_idx, **kwargs)
class TransformableDataset(Dataset):
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __getitem__(self, index):
x, y = self.dataset[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.dataset)
class Resize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
old_size = img.size # old_size[0] is in (width, height) format
ratio = float(self.size)/min(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
return img.resize(new_size, resample=self.interpolation)
class FastCIFAR(CIFAR10):
def __init__(self, root='/data/datasets/cifarnew/', train=True, transform=None, device=None, size=None, **kwargs):
super().__init__(root=root, train=train, **kwargs)
self.transform=transform
# Scale data to [0,1]
self.data = torch.tensor(self.data).float().div(255)
self.data = self.data.permute(0, 3, 1, 2)
if size is not None:
self.data = F.interpolate(self.data, (3, size, size))
# Normalize it with the usual MNIST mean and std
self.data[:,0] = self.data[:,0].sub_(0.4057).div_(0.2039)
self.data[:,1] = self.data[:,1].sub_(0.5112).div_(0.2372)
self.data[:,2] = self.data[:,2].sub_(0.5245).div_(0.3238)
self.targets = torch.tensor(self.targets)
# Put both data and targets on GPU in advance
if device is not None:
self.data, self.targets = self.data.to(device), self.targets.to(device)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.transform:
img = self.transform(img)
return img, target
class FastMNIST(MNIST):
def __init__(self, *args, transform=None, device=torch.device('cuda:0'), size=None, **kwargs):
super().__init__(*args, **kwargs)
self.transform=transform
# Scale data to [0,1]
self.data = self.data.unsqueeze(1).float().div(255)
if size is not None:
self.data = F.interpolate(self.data, (size, size))
# Normalize it with the usual MNIST mean and std
self.data = self.data.sub_(0.1307).div_(0.3081)
# Put both data and targets on GPU in advance
if device is not None:
self.data, self.targets = self.data.to(device), self.targets.to(device)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.transform:
img = self.transform(img)
return img, target
class FastMNIST3(FastMNIST):
def __init__(self, *args, transform=None, device=torch.device('cuda:0'), size=None, **kwargs):
super().__init__(*args, transform=None, device=torch.device('cuda:0'), **kwargs)
self.size = size
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.size is not None:
img = F.interpolate(img.unsqueeze(0), (self.size, self.size)).squeeze(0)
if self.transform:
img = self.transform(img)
img = torch.cat([img, img, img], axis=0)
return img, target
def mnist(path='/data/datasets/mnist2', batch_size=64, transform=None, size=None, **kwargs):
'''
returns an image_databunch of the mnist dataset in greyscale (shape is (1,28,28).
path: folder where the mnist dataset is, or will be downloaded to if it does not exist
batch_size (64): batch_size used for segmenting for training.
transform (None): pipeline of transformations that are applied to the images
size (None): resizes the images to (size, size).
'''
train_ds = FastMNIST(path, transform=transform, train=True, size=size, **kwargs)
valid_ds = FastMNIST(path, transform=transform, train=False, size=size, **kwargs)
db = image_databunch(train_ds, valid_ds, batch_size=batch_size,
normalized_mean=(0.1307,), normalized_std=(0.3081,))
return db
def mnist3(path='/data/datasets/mnist2', batch_size=64, size=None, transform=None, **kwargs):
'''
returns an image_databunch of the mnist dataset in rgb (shape is (3,28,28).
path: folder where the mnist dataset is, or will be downloaded to if it does not exist
batch_size (64): batch_size used for segmenting for training.
transform (None): pipeline of transformations that are applied to the images
size (None): resizes the images to (size, size).
'''
train_ds = FastMNIST3(path, transform=transform, train=True, size=size, **kwargs)
valid_ds = FastMNIST3(path, transform=transform, train=False, size=size, **kwargs)
db = image_databunch(train_ds, valid_ds, batch_size=batch_size,
normalized_mean=(0.1307, 0.1307, 0.1307), normalized_std=(0.3081, 0.3081, 0.3081))
return db
def cifar(path='/data/datasets/cifarnew/', batch_size=64, size=None, transform=None, **kwargs):
train_ds = FastCIFAR(root=path, transform=transform, train=True, size=size, **kwargs)
valid_ds = FastCIFAR(root=path, transform=transform, train=False, size=size, **kwargs)
db = image_databunch(train_ds, valid_ds, batch_size=batch_size,
normalized_mean=(0.4057, 0.5112, 0.5245), normalized_std=(0.2039, 0.2372, 0.3238))
return db
def create_path(p, mode=0o777):
path = Path(p)
os.makedirs(path, mode, exist_ok=True)
return path
def image_folder():
return f'/tmp/{getuser()}/images'
def _gis_args(keywords, output_directory=None,
image_directory=None, limit=200, format='jpg', color_type='full-color',
size='medium', type='photo', delay=0, **kwargs):
if output_directory is None:
output_directory = str(create_path(image_folder()))
if image_directory is None:
image_directory = '_'.join(keywords.split())
arguments = {"keywords":keywords,
"limit":limit, "format":format, "color_type":color_type, "size":size, "type":type,
"delay":delay, "image_directory":image_directory,
"output_directory":output_directory, "chromedriver":"/usr/bin/chromedriver" }
arguments.update(kwargs)
return arguments
def crawl_images(keywords, output_directory=None,
image_directory=None, limit=200, format='jpg', color_type='full-color',
size='medium', type='photo', delay=0, **kwargs):
"""
Downloads images through Google Image Search,
see https://google-images-download.readthedocs.io/en/latest/arguments.html
for info on the arguments.
keywords: the keywords passed to google image search to retrieve images
limit: maximum number of images to retrieve (default=200). You will actually receive less iamges because many links will not work
output_directory: base folder for the downloads (default: /tmp/username/images/)
image_directory: subpath to store the images for this query (by default uses the query name)
format: compression type of photos that are downloaded (default='jpg')
color-type: default='full-color', see https://google-images-download.readthedocs.io/en/latest/arguments.html
size: default='medium', see https://google-images-download.readthedocs.io/en/latest/arguments.html
type: default='photo', see https://google-images-download.readthedocs.io/en/latest/arguments.html
delay: default=0, to pause between downloads, see https://google-images-download.readthedocs.io/en/latest/arguments.html
kwargs: any additional arguments that google-images-download accepts.
"""
try:
from .google_images_download import googleimagesdownload
except:
raise NotImplemented('Need google images download for this')
kwargs = _gis_args(keywords, output_directory=output_directory, image_directory=image_directory,
limit=limit, format=format, color_type=color_type, size=size, type=type, delay=delay,
**kwargs)
response = googleimagesdownload() #class instantiation
paths = response.download(kwargs) #passing the arguments to the function
def filter_images(keywords, folder=None, columns=4, height=200, width=200):
"""
Removes duplicate images and shows the remaining images so that the user can manually select
images to remove from the folder by pressing the DELETE button below.
keywords: subfolder of 'folder' in which the images are stored
folder: folder/output_directory where the crawled images are stored (e.g. /tmp/username/images)
columns (4): number of images displayed per row
height (200): height of the images in pixels
width (200): width of the images in pixels
"""
def on_click(button):
for r in rows:
if type(r) is widgets.HBox:
for c in r.children:
checkbox = c.children[1]
if checkbox.value:
print(checkbox.description_tooltip)
os.remove(checkbox.description_tooltip)
if folder is None:
folder = Path(image_folder())
keywords = '_'.join(keywords.split())
imagefiles = [f for f in folder.glob(keywords + '/*')]
rows = []
cols = []
bymean = {}
for i, imgfile in enumerate(tqdm(imagefiles)):
row = i // columns
col = i % columns
img = Image.open(imgfile)
m = hash(tuple(ImageStat.Stat(img).mean))
buff = io.BytesIO()
img.save(buff, format='JPEG')
if m in bymean:
os.remove(imgfile)
else:
bymean[m] = imgfile
image = widgets.Image( value=buff.getvalue(), width=width, height=height )
button = widgets.Checkbox( description='Delete', description_tooltip = str(imgfile) )
box = widgets.VBox([image, button])
cols.append(box)
if len(cols) == columns:
rows.append(widgets.HBox(cols))
cols = []
if len(cols) > 0:
rows.append(widgets.HBox(cols))
button = widgets.Button( description='Delete' )
button.on_click(on_click)
rows.append(button)
return widgets.VBox(rows)
```
#### File: pipetorch/data/ptdataset.py
```python
import numpy as np
import pandas as pd
from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
import matplotlib.pyplot as plt
from sklearn.utils import resample
import copy
import os
def to_numpy(arr):
try:
return arr.data.cpu().numpy()
except: pass
try:
return arr.to_numpy()
except: pass
return arr
class PTDS:
_metadata = ['_df', '_dfindices', '_pt_categoryx', '_pt_categoryy', '_pt_dummiesx', '_pt_dummiesy', '_pt_columny', '_pt_columnx', '_pt_transposey', '_pt_bias', '_pt_polynomials', '_pt_dtype', '_pt_sequence_window', '_pt_sequence_shift_y', '_pt_is_test']
_internal_names = pd.DataFrame._internal_names + ["_pt__indices", "_pt__x_sequence"]
_internal_names_set = set(_internal_names)
def to_ptdataframe(self):
cls = self._df.__class__
r = cls(self)
r._pt_columnx = self._pt_columnx
r._pt_columny = self._pt_columny
r._pt_transposey = self._pt_transposey
r._pt_bias = self._pt_bias
r._pt_polynomials = self._pt_polynomials
r._pt_sequence_window = self._pt_sequence_window
r._pt_sequence_shift_y = self._pt_sequence_shift_y
r._pt__train = self
r._pt__full = self
r._pt__valid = None
r._pt__test = None
r._pt_indices = list(range(len(self)))
r._pt__train_indices = r._pt_indices
r._pt__valid_indices = []
r._pt__test_indices = []
r._pt_split = None
r._pt_random_state = None
r._pt_balance = None
r._pt_shuffle = False
return r
def _copy_meta(self, r):
r._df = self._df
r._dfindices = self._dfindices
r._pt_categoryx = self._pt_categoryx
r._pt_categoryy = self._pt_categoryy
r._pt_dummiesx = self._pt_dummiesx
r._pt_dummiesy = self._pt_dummiesy
r._pt_columny = self._pt_columny
r._pt_columnx = self._pt_columnx
r._pt_is_test = self._pt_is_test
r._pt_transposey = self._pt_transposey
r._pt_polynomials = self._pt_polynomials
r._pt_bias = self._pt_bias
r._pt_dtype = self._pt_dtype
r._pt_sequence_window = self._pt_sequence_window
r._pt_sequence_shift_y = self._pt_sequence_shift_y
return r
def _ptdataset(self, data):
return self._copy_meta( PTDataSet(data) )
def _not_nan(self, a):
a = np.isnan(a)
while len(a.shape) > 1:
a = np.any(a, -1)
return np.where(~a)[0]
@property
def _dtype(self):
return self._pt_dtype
@property
def indices(self):
try:
return self._pt__indices
except:
if self._pt_is_test:
self._pt__indices = self._not_nan(self._x_sequence)
else:
s = set(self._not_nan(self._y_transposed))
self._pt__indices = [ i for i in self._not_nan(self._x_sequence) if i in s]
return self._pt__indices
@property
def _scalerx(self):
return self._df._scalerx
@property
def _scalery(self):
return self._df._scalery
@property
def _categoryx(self):
return self._pt_categoryx()
@property
def _categoryy(self):
return self._pt_categoryy()
@property
def _dummiesx(self):
return self._pt_dummiesx()
@property
def _dummiesy(self):
return self._pt_dummiesy()
@property
def _shift_y(self):
if self._pt_sequence_shift_y is not None:
return self._pt_sequence_shift_y
else:
return 0
@property
def _sequence_window(self):
try:
if self._is_sequence:
return self._pt_sequence_window
except:pass
return 1
@property
def _sequence_index_y(self):
return self._pt_sequence_window+self._shift_y-1
@property
def _columny(self):
return [ self.columns[-1] ] if self._pt_columny is None else self._pt_columny
@property
def _transposey(self):
return True if self._pt_transposey is None else self._pt_transposey
@property
def _columnx(self):
if self._pt_columnx is None:
return [ c for c in self.columns if c not in self._columny ]
return self._pt_columnx
@property
def _polynomials(self):
return self._pt_polynomials
@property
def _bias(self):
return self._pt_bias
def _transform(self, scalers, array):
out = []
for i, scaler in enumerate(scalers):
if scaler is not None:
out.append(scaler.transform(array[:, i:i+1]))
else:
out.append(array[:, i:i+1])
return np.concatenate(out, axis=1)
def resample_rows(self, n=True):
r = self._ptdataset(self)
if n == True:
n = len(r)
if n < 1:
n = n * len(r)
return r.iloc[resample(list(range(len(r))), n_samples = int(n))]
def interpolate_factor(self, factor=2, sortcolumn=None):
if not sortcolumn:
sortcolumn = self.columns[0]
df = self.sort_values(by=sortcolumn)
for i in range(factor):
i = df.rolling(2).sum()[1:] / 2.0
df = pd.concat([df, i], axis=0)
df = df.sort_values(by=sortcolumn)
return self._df._ptdataset(df).reset_index(drop=True)
@property
def _x_category(self):
if self._is_sequence:
self = self.iloc[:-self._shift_y]
if self._categoryx is None:
return self[self._columnx]
r = copy.copy(self[self._columnx])
for c, cat in zip(r._columnx, r._categoryx):
if cat is not None:
r[c] = cat.transform(r[c])
return r
@property
def _x_dummies(self):
if self._dummiesx is None:
return self._x_category
r = copy.copy(self._x_category)
r1 = []
for d, onehot in zip(r._columnx, r._dummiesx):
if onehot is not None:
a = onehot.transform(r[[d]])
r1.append( pd.DataFrame(a.toarray(), columns=onehot.get_feature_names_out([d])) )
r = r.drop(columns = d)
r1.insert(0, r.reset_index(drop=True))
r = pd.concat(r1, axis=1)
return r
@property
def _x_numpy(self):
return self._x_dummies.to_numpy()
@property
def _x_polynomials(self):
try:
return self._polynomials.fit_transform(self._x_numpy)
except:
return self._x_numpy
@property
def _x_scaled(self):
if len(self) > 0:
return self._transform(self._scalerx, self._x_polynomials)
return self._x_polynomials
@property
def _x_biased(self):
a = self._x_scaled
if self._bias:
return np.concatenate([np.ones((len(a),1)), a], axis=1)
return a
@property
def _x_sequence(self):
try:
return self._pt__x_sequence
except:
if not self._is_sequence:
self._pt__x_sequence = self._x_biased
else:
X = self._x_biased
window = self._sequence_window
len_seq_mode = max(0, len(X) - window + 1)
self._pt__x_sequence = np.concatenate([np.expand_dims(X[ii:ii+window], axis=0) for ii in range(len_seq_mode)], axis=0)
return self._pt__x_sequence
@property
def X(self):
return self._x_sequence[self.indices]
@property
def X_tensor(self):
import torch
if self._dtype is None:
return torch.tensor(self.X).type(torch.FloatTensor)
else:
return torch.tensor(self.X)
@property
def y_tensor(self):
import torch
if self._dtype is None:
return torch.tensor(self.y).type(torch.FloatTensor)
else:
return torch.tensor(self.y)
@property
def _is_sequence(self):
return self._pt_sequence_window is not None
@property
def tensors(self):
return self.X_tensor, self.y_tensor
@property
def _range_y(self):
stop = len(self) if self._shift_y >= 0 else len(self) + self._shift_y
start = min(stop, self._sequence_window + self._shift_y - 1)
return slice(start, stop)
@property
def _y_category(self):
if self._is_sequence:
self = self.iloc[self._range_y]
if self._categoryy is None:
return self[self._columny]
r = copy.copy(self[self._columny])
for d, onehot in zip(r._columny, r._dummiesy):
if onehot is not None:
r[c] = cat.transform(r[c])
return r
@property
def _y_dummies(self):
if self._dummiesy is None:
return self._y_category
r = copy.copy(self._y_category)
r1 = []
for d, onehot in zip(r._columny, r._dummiesy):
if onehot is not None:
a = onehot.transform(r[[d]])
r1.append( pd.DataFrame(a.toarray(), columns=onehot.get_feature_names_out([d])) )
r = r.drop(columns = d)
r1.insert(0, r.reset_index(drop=True))
r = pd.concat(r1, axis=1)
return r
@property
def _y_numpy(self):
return self._y_dummies.to_numpy()
@property
def _y_scaled(self):
if len(self) > 0:
return self._transform(self._scalery, self._y_numpy)
return self._y_numpy
@property
def _y_transposed(self):
return self._y_scaled.squeeze() if self._transposey else self._y_scaled
@property
def y(self):
return self._y_transposed[self.indices]
def replace_y(self, new_y):
y_pred = self._predict(new_y)
offset = self._range_y.start
indices = [ i + offset for i in self.indices ]
assert len(y_pred) == len(indices), f'The number of predictions ({len(y_pred)}) does not match the number of samples ({len(indices)})'
r = copy.deepcopy(self)
r[self._columny] = np.NaN
columns = [r.columns.get_loc(c) for c in self._columny]
r.iloc[indices, columns] = y_pred.values
return r
def to_dataset(self):
"""
returns: a list with a train, valid and test DataSet. Every DataSet contains an X and y, where the
input data matrix X contains all columns but the last, and the target y contains the last column
columns: list of columns to convert, the last column is always the target. default=None means all columns.
"""
from torch.utils.data import TensorDataset
return TensorDataset(*self.tensors)
def _predict_y(self, predict):
if not callable(predict):
return predict
try:
from torch import nn
import torch
with torch.set_grad_enabled(False):
return to_numpy(predict(self.X_tensor)).reshape(len(self))
except:
raise
try:
return predict(self.X).reshape(len(self))
except:
raise
raise ValueError('predict mus be a function that works on Numpy arrays or PyTorch tensors')
def _predict(self, predict):
return self.inverse_transform_y(self._predict_y(predict))
def predict(self, predict, drop=True):
y_pred = self._predict_y(predict)
if drop:
return self._df.inverse_transform(self.X, y_pred)
return self._df.inverse_transform(self.X, self.y, y_pred)
def add_column(self, y_pred, *columns):
y_pred = to_numpy(y_pred)
offset = self._range_y.start
indices = [ i + offset for i in self.indices ]
assert len(y_pred) == len(indices), f'The number of predictions ({len(y_pred)}) does not match the number of samples ({len(indices)})'
r = copy.deepcopy(self)
y_pred = self.inverse_transform_y(y_pred)
if len(columns) == 0:
columns = [ c + '_pred' for c in self._columny ]
for c in columns:
r[c] = np.NaN
columns = [r.columns.get_loc(c) for c in columns]
r.iloc[indices, columns] = y_pred.values
return r
def inverse_transform_y(self, y_pred):
return self._df.inverse_transform_y(y_pred)
def line(self, x=None, y=None, xlabel = None, ylabel = None, title = None, **kwargs ):
self._df.evaluate().line(x=x, y=y, xlabel=xlabel, ylabel=ylabel, title=title, df=self, **kwargs)
def scatter(self, x=None, y=None, xlabel = None, ylabel = None, title = None, **kwargs ):
self._df.evaluate().scatter(x=x, y=y, xlabel=xlabel, ylabel=ylabel, title=title, df=self, **kwargs)
def scatter2d_class(self, x1=None, x2=None, y=None, xlabel=None, ylabel=None, title=None, loc='upper right', noise=0, **kwargs):
self._df.evaluate().scatter2d_class(x1=x1, x2=x2, y=y, xlabel=xlabel, ylabel=ylabel, title=title, loc=loc, noise=noise, df=self, **kwargs)
def scatter2d_color(self, x1=None, x2=None, c=None, xlabel=None, ylabel=None, title=None, noise=0, **kwargs):
self._df.evaluate().scatter2d_color(x1=x1, x2=x2, c=c, xlabel=xlabel, ylabel=ylabel, title=title, noise=noise, df=self, **kwargs)
def scatter2d_size(self, x1=None, x2=None, s=None, xlabel=None, ylabel=None, title=None, noise=0, **kwargs):
self._df.evaluate().scatter2d_size(x1=x1, x2=x2, s=s, xlabel=xlabel, ylabel=ylabel, title=title, noise=noise, df=self, **kwargs)
def plot_boundary(self, predict):
self._df.evaluate().plot_boundary(predict)
def plot_contour(self, predict):
self._df.evaluate().plot_contour(predict)
class PTDataSet(pd.DataFrame, PTDS):
_metadata = PTDS._metadata
_internal_names = PTDS._internal_names
_internal_names_set = PTDS._internal_names_set
@property
def _constructor(self):
return PTDataSet
@classmethod
def from_ptdataframe(cls, data, df, dfindices):
r = cls(data)
r._df = df
r._dfindices = dfindices
r._pt_categoryx = df._categoryx
r._pt_categoryy = df._categoryy
r._pt_dummiesx = df._dummiesx
r._pt_dummiesy = df._dummiesy
r._pt_columny = df._columny
r._pt_columnx = df._columnx
r._pt_transposey = df._transposey
r._pt_polynomials = df._pt_polynomials
r._pt_bias = df._pt_bias
r._pt_dtype = df._pt_dtype
r._pt_is_test = False
r._pt_sequence_window = df._pt_sequence_window
r._pt_sequence_shift_y = df._pt_sequence_shift_y
return r
@classmethod
def df_to_testset(cls, data, df, dfindices):
r = cls.from_ptdataframe(data, df, dfindices)
r._pt_is_test = True
return r
def groupby(self, by, axis=0, level=None, as_index=True, sort=True, group_keys=True, observed=False, dropna=True):
r = super().groupby(by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna)
return self._copy_meta( PTGroupedDataSet(r) )
class PTGroupedDataSetSeries(SeriesGroupBy, PTDS):
_metadata = PTDS._metadata
#_internal_names = PTDS._internal_names
#_internal_names_set = PTDS._internal_names_set
@property
def _constructor(self):
return PTGroupedDataSetSeries
@property
def _constructor_expanddim(self):
return PTGroupedDataFrame
class PTGroupedDataSet(DataFrameGroupBy, PTDS):
_metadata = PTDS._metadata
#_internal_names = PTDS._internal_names
#_internal_names_set = PTDS._internal_names_set
def __init__(self, data=None):
super().__init__(obj=data.obj, keys=data.keys, axis=data.axis, level=data.level, grouper=data.grouper, exclusions=data.exclusions,
selection=data._selection, as_index=data.as_index, sort=data.sort, group_keys=data.group_keys,
observed=data.observed, mutated=data.mutated, dropna=data.dropna)
@property
def _constructor(self):
return PTGroupedDataSet
@property
def _constructor_sliced(self):
return PTGroupedDataSetSeries
def __iter__(self):
for group, subset in super().__iter__():
yield group, self._copy_meta(subset)
def astype(self, dtype, copy=True, errors='raise'):
PTDataSet.astype(self, dtype, copy=copy, errors=errors)
def get_group(self, name, obj=None):
return self._ptdataset( super().get_group(name, obj=obj) )
def to_dataset(self):
from torch.utils.data import ConcatDataset
dss = []
for key, group in self:
dss.append( group.to_dataset())
return ConcatDataset(dss)
```
#### File: pipetorch/model/perceptron.py
```python
from torchvision.models import *
import torch
import torch.nn as nn
identity=lambda x:x
class SingleLayerPerceptron(nn.Module):
def __init__(self, input, output, last_activation=identity):
super().__init__()
self.w1 = nn.Linear(input, output)
self.a1 = last_activation
def forward(self, x):
return self.a1(self.w1(x))
#return pred_y.view(-1)
class SingleLayerPerceptron_BinaryClass(SingleLayerPerceptron):
def __init__(self, input, output):
super().__init__(input, output, nn.Sigmoid())
def post_forward(self, y):
return (y > 0.5).float()
class SingleLayerPerceptron_MultiClass(SingleLayerPerceptron):
def __init__(self, input, output):
super().__init__(input, output, nn.LogSoftmax(dim=1))
def flatten_r_image(x):
return x[:,0,:,:].view(x.shape[0], -1)
class MultiLayerPerceptron(nn.Module):
def __init__(self, *width, preprocess=identity, inner_activation=nn.ReLU(), drop_prob=None, last_activation=None):
super().__init__()
self.actions = [preprocess]
for n, (i, o) in enumerate(zip(width[:-1], width[1:])):
l = nn.Linear(i, o)
self.actions.append(l)
self.__setattr__(f'w{n+1}', l)
if n < len(width) - 2:
if drop_prob is not None:
self.actions.append(nn.Dropout(p=drop_prob))
self.__setattr__(f'drop{n+1}', self.actions[-1])
self.actions.append(inner_activation)
self.__setattr__(f'activation{n+1}', self.actions[-1])
elif last_activation is not None:
self.actions.append(last_activation)
self.__setattr__(f'activation{n+1}', self.actions[-1])
#if width[-1] == 1:
# self.reshape = (-1)
#else:
# self.reshape = (-1, width[-1])
def forward(self, x):
for a in self.actions:
x = a(x)
return x #.view(self.reshape)
class MultiLayerPerceptron_BinaryClass(MultiLayerPerceptron):
def __init__(self, *width, preprocess=identity, inner_activation=nn.ReLU(), drop_prob=None):
super().__init__(*width, preprocess=preprocess, inner_activation=inner_activation, drop_prob=drop_prob, last_activation=nn.nn.Sigmoid())
def post_forward(self, y):
return (y > 0.5).float()
class MultiLayerPerceptron_MultiClass(MultiLayerPerceptron):
def __init__(self, *width, preprocess=identity, inner_activation=nn.ReLU(), drop_prob=None):
super().__init__(*width, preprocess=preprocess, inner_activation=inner_activation, drop_prob=drop_prob)
def post_forward(self, y):
return torch.argmax(y, axis=1)
class TwoLayerPerceptron(nn.Module):
def __init__(self, input, hidden, output, last_activation=None):
super().__init__()
self.w1 = nn.Linear(input, hidden)
self.a1 = nn.ReLU()
self.w2 = nn.Linear(hidden, output)
if last_activation:
self.a2 = last_activation
def forward(self, x):
x = self.a1(self.w1(x))
pred_y = self.a2(self.w2(x))
return pred_y #.view(-1)
def post_forward(self, y):
return y
class TwoLayerPerceptron_BinaryClass(TwoLayerPerceptron):
def __init__(self, input, hidden, output):
super().__init__(input, hidden, output, last_activation=nn.Sigmoid())
def post_forward(self, y):
return (y > 0.5).float()
class TwoLayerPerceptron_MultiClass(TwoLayerPerceptron):
def __init__(self, input, hidden, output):
super().__init__(input, hidden, output, last_activation=nn.LogSoftmax(dim=1))
def zero_embedding(rows, columns):
e = nn.Embedding(rows, columns)
e.weight.data.zero_()
return e
class factorization(nn.Module):
def __init__(self, n_users, n_items, n_factors=20):
super().__init__()
self.user_factors = nn.Embedding( n_users,n_factors)
self.item_factors = nn.Embedding( n_items,n_factors)
self.user_bias = zero_embedding( n_users, 1)
self.item_bias = zero_embedding( n_items, 1)
self.fc = nn.Linear(n_factors, 4)
def forward(self, X):
user = X[:,0] - 1
item = X[:,1] - 1
return (self.user_factors(user) * self.item_factors(item)).sum(1) + self.user_bias(user).squeeze() + self.item_bias(item).squeeze()
```
#### File: pipetorch/train/helper.py
```python
import torch
import os
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patheffects as PathEffects
from IPython.core import pylabtools as pt
from pathlib2 import Path
from sklearn.manifold import TSNE
import seaborn as sns
import numpy as np
import sys
from IPython import get_ipython
ipython = get_ipython()
back2gui = { b:g for g, b in pt.backends.items() }
class plt_gui:
def __init__(self, gui):
self.gui = gui
def __enter__(self):
backend = matplotlib.get_backend()
self.old_gui = back2gui[backend]
ipython.magic('matplotlib ' + self.gui)
def __exit__(self, *args):
ipython.magic('matplotlib ' + self.old_gui)
class plt_inline(plt_gui):
def __init__(self):
super().__init__('inline')
class plt_notebook(plt_gui):
def __init__(self):
super().__init__('notebook')
def getsizeof(o, ids=set()):
d = deep_getsizeof
if id(o) in ids:
return 0
r = sys.getsizeof(o)
ids.add(id(o))
if isinstance(o, str) or isinstance(0, unicode):
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r
class Plot:
def __init__(self, xlabel=None, ylabel='Loss', xscale=None, yscale='log', **kwargs):
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111)
self.figure.show()
self.xlabel = xlabel
self.ylabel = ylabel
self.xscale = xscale
self.yscale = yscale
def __enter__(self):
plt.ion()
return self
def __exit__(self, *args):
plt.ioff()
def set_ylim(self, y):
y = np.array(y)
while True:
mean_y = np.mean(y)
sd_y = np.std(y)
keep = (y >= mean_y - 4 * sd_y) & (y <= mean_y + 4 * sd_y)
if sum(keep) == len(y):
break
y = y[keep]
if min(y) < max(y):
self.ax.set_ylim(max(y) - (max(y) - min(y)) * 1.1, min(y) + (max(y) - min(y)))
def set_ylim_multi(self, yy):
min_y = None
max_y = None
for y in yy.values():
y = np.array(y)
while True:
mean_y = np.mean(y)
sd_y = np.std(y)
keep = (y >= mean_y - 3 * sd_y) & (y <= mean_y + 3 * sd_y)
if sum(keep) == len(y):
break
y = y[keep]
if min_y is not None:
min_y = min(min_y, min(y))
max_y = max(max_y, max(y))
else:
min_y = min(y)
max_y = max(y)
if min_y < max_y:
self.ax.set_ylim(max_y - (max_y - min_y) * 1.05, min_y + (max_y - min_y)*1.05)
def replot(self, x, y):
self.ax.clear()
if self.xlabel:
self.ax.set_xlabel(self.xlabel)
if self.ylabel:
self.ax.set_ylabel(self.ylabel)
if self.xscale:
self.ax.set_xscale(self.xscale)
if self.yscale:
self.ax.set_yscale(self.yscale)
self.set_ylim(y)
self.ax.plot( x, y)
plt.show()
self.figure.canvas.draw()
def multiplot(self, x, yy):
self.ax.clear()
if self.xlabel:
self.ax.set_xlabel(self.xlabel)
if self.ylabel:
self.ax.set_ylabel(self.ylabel)
if self.xscale:
self.ax.set_xscale(self.xscale)
if self.yscale:
self.ax.set_yscale(self.yscale)
self.set_ylim_multi(yy)
for name, y in yy.items():
self.ax.plot( x, y, label=str(name))
self.ax.legend()
self.figure.canvas.draw()
def to_numpy(arr):
if type(arr) is torch.Tensor:
if arr.device.type == 'cuda':
return arr.data.cpu().numpy()
else:
return arr.data.numpy()
return arr
def plot_histories(metric, history, train=True, valid=True, **kwargs):
plt.figure(**kwargs)
for label, t in history.items():
h = t.history
x = [ epoch['epoch'] for epoch in h.epochs['train'] ]
if train:
plt.plot(x, h.train(metric), label=f'train_{label}')
if valid:
plt.plot(x, h.valid(metric), label=f'valid_{label}')
plt.ylabel(metric.__name__)
plt.xlabel("epochs")
plt.legend()
plt.show()
def create_path(p, mode=0o777):
path = Path(p)
os.makedirs(path, mode, exist_ok=True)
return path
def scatter(x, colors):
num_classes = len(np.unique(colors))
palette = np.array(sns.color_palette("hls", num_classes))
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)])
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
txts = []
for i in range(num_classes):
xtext, ytext = np.median(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
#return f, ax, sc, txts
def to_numpy1(a):
try:
a = a.detach()
except: pass
try:
a = a.numpy()
except: pass
return a
def draw_regression(x, y_true, y_pred):
f = plt.figure(figsize=(8, 8))
x, y_true, y_pred = [to_numpy(a) for a in (x, y_true, y_pred)]
plt.scatter(x, y_true)
indices = np.argsort(x)
plt.plot(x[indices], y_pred[indices])
def line_predict(x, y_true, y_pred):
draw_regression(x, y_true, y_pred)
def scatter(x, y):
f = plt.figure(figsize=(8, 8))
x, y = [to_numpy(a) for a in (x, y)]
plt.scatter(x, y)
def range3(start, end):
while start < end:
yield start
yield start * 3
start *= 10
def plot_tsne(X, y, random_state=0):
t = TSNE(random_state=random_state).fit_transform(X)
scatter(t, y)
def trace_warnings():
import traceback
import warnings
import sys
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
def expand_features(df, target, *features):
if len(features) == 0:
return [c for c in df.columns if c != target]
else:
return [c for c in features if c != target]
def read_csv(filename, nrows=100, drop=None, columns=None, dtype=dict(), intcols=[], **kwargs):
df = pd.read_csv(filename, nrows=nrows, engine='python', **kwargs)
if drop:
df = df.drop(columns=drop)
elif columns:
df = df[columns]
float_cols = [c for c in df if df[c].dtype.kind == "f" or df[c].dtype.kind == "i"]
float32_cols = {c:np.float32 for c in float_cols}
float32_cols.update({ c:np.int64 for c in intcols })
float32_cols.update(dtype)
df = pd.read_csv(filename, dtype=float32_cols, engine='python', low_memory=False, **kwargs)
if drop:
df = df.drop(columns=drop)
elif columns:
df = df[columns]
return df
class nonondict(dict):
"""
A dict that does not store None values, which is used to keep a
dict of parameters for function calls, in which setting to None
does not override the default setting.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.update(*args, **kwargs)
def __setitem__(self, key, value):
if value is None:
try:
del self[key]
except: pass
else:
super().__setitem__(key, value)
def setifnone(self, key, value):
"""
Set a key to a value, only if that key does not yet exists.
Since None values are not added, this also applies to keys
that are previously set to None.
Arguments:
key: str
value: any
"""
if key not in self:
self[key] = value
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
```
#### File: pipetorch/train/trainer.py
```python
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
import timeit
import sys
import copy
import inspect
import numpy as np
import math
from tqdm.notebook import tqdm
from ..evaluate.evaluate import Evaluator
from torch.optim.lr_scheduler import OneCycleLR, ConstantLR
from .tuner import *
from .helper import nonondict
from functools import partial
import os
try:
GPU = int(os.environ['GPU'])
GPU = 0
except:
GPU = -1
# def last_container(last):
# try:
# l = last_container(last.children())
# if l is not None:
# return l
# except: pass
# try:
# if len(last._modules) > 0 and next(reversed(last._modules.values())).out_features > 0:
# return last
# except: pass
def to_numpy(arr):
try:
return arr.data.cpu().numpy()
except: pass
try:
return arr.to_numpy()
except: pass
return arr
# class DLModel(nn.Module):
# def __init__(self):
# super().__init__()
# def set_last_linear(self, out_features):
# container = self.last_container()
# name, last = container._modules.popitem()
# container.add_module(name, nn.Linear(last.in_features, out_features))
# def last_container(self):
# return last_container(self)
def UniformLR(*args, **kwargs):
class Uniform_Scheduler:
def step(self):
pass
return Uniform_Scheduler()
def onecycle(optimizer, lr, steps):
return OneCycleLR(optimizer, lr[1], total_steps=steps)
class ordered_dl:
def __init__(self, dl):
self.dl = dl
def __enter__(self):
self.oldsampler = self.dl.batch_sampler.sampler
self.newsampler = torch.utils.data.sampler.SequentialSampler(self.oldsampler.data_source)
self.dl.batch_sampler.sampler = self.newsampler
return self.dl
def __exit__(self, exc_type, exc_value, tb):
self.dl.batch_sampler.sampler = self.oldsampler
if exc_type is not None:
return False
class trainer:
"""
A general purpose trainer for PyTorch.
Arguments:
model: nn.Module
a PyTorch Module that will be trained
loss: callable
a PyTorch or custom loss function
data: databunch or a list of iterables (DataLoaders)
a databunch is an object that has a train_dl, valid_dl,
and optionally test_dl property.
otherwise, a list of iterables can also be given.
Most often, these iterables are PyTorch DataLoaders that
are used to iterate over the respective datasets
for training and validation.
metrics: callable or list of callable
One or more functions that can be called with (y, y_pred)
to compute an evaluation metric. This will automatically be
done during training, for both the train and valid sets.
Typically, the callable is a function from SKLearn.metrics
like mean_squared_error or recall_score.
optimizer: PyTorch Optimizer (AdamW)
The PyTorch or custom optimizer class that is used during training
optimizerparams: dict (None)
the parameters that are passed (along with the model parameters)
to initialize an optimizer. A 'nonondict' is used, meaning that
when a None value is set, the key is removed, so that the default
value is used instead.
random_state: int
used to set a random state for reproducible results
scheduler: None, OneCycleLR, ConstantLR
used to adapt the learning rate:
- None will use a constant learning rate
- OneCycleLR will will use a cyclic annealing learning rate
between an upper and lower bound.
- ConstantLR will use a linear decaying learning rate between
an upper bound and lower bound. You can optionally use
'cycle' when calling 'train' to restart ConstantLR
every 'cycle' epochs.
weight_decay: float
Apply weight_decay regularization with the AdamW optimizer
momentum: float
Apply momentum with the AdamW optimizer
gpu: bool, int or torch.device
The device to train on:
False or -1: cpu
True: cuda:0, this is probably what you want to train on gpu
int: cuda:gpu
Setting the device will automatically move the model and data to
the given device. Note that the model is not automatically
transfered back to cpu afterwards.
evaluator: PipeTorch evaluator
An evaluator that was created by a different trainer or
DataFrame, to combine the results of different training
sessions.
"""
def __init__(self, model, loss, *data, metrics = [], optimizer=AdamW, optimizerparams=None, random_state=None, scheduler=None, weight_decay=None, momentum=None, gpu=False, evaluator=None, **kwargs):
self.report_frequency = 1
self.loss = loss
self.random_state = random_state
self.gpu(gpu)
self.set_data(*data)
self._model = model
try:
self.post_forward = model.post_forward
except: pass
self.optimizer = optimizer
self.optimizer_params = optimizerparams
self.scheduler = scheduler
if self.random_state is not None:
torch.backends.cudnn.deterministic=True
torch.manual_seed(self.random_state)
self._commit = {}
self.epochid = 0
self.weight_decay = weight_decay
self.momentum = momentum
self.lowest_score=None
self.highest_score=None
if evaluator is not None:
assert len(metrics) == 0, 'When you assign an evaluator, you cannot assign different metrics to a trainer'
self._evaluator = evaluator
self.metrics = evaluator.metrics
else:
self.metrics = metrics
def set_data(self, *data):
"""
Changes the dataset that is used by the trainer
Arguments:
data: databunch or a list of iterables (DataLoaders)
a databunch is an object that has a train_dl, valid_dl,
and optionally test_dl property.
otherwise, a list of iterables can also be given.
Most often, these iterables are PyTorch DataLoaders that
are used to iterate over the respective datasets
for training and validation.
"""
assert len(data) > 0, 'You have to specify a data source. Either a databunch or a set of dataloaders'
if len(data) == 1:
db = data[0]
self.databunch = db
elif len(data) < 4:
try:
_ = iter(data[0])
self.train_dl = data[0]
except TypeError:
raise TypeError('The first data source must be iterable, preferably a DataLoader that provide an X and y')
try:
_ = iter(data[1])
self.valid_dl = data[1]
except TypeError:
raise TypeError('The second data source must be iterable, preferably a DataLoader that provide an X and y')
if len(data) > 2:
try:
_ = iter(data[2])
self.test_dl = data[2]
except TypeError:
raise TypeError('The third data source must be iterable, preferably a DataLoader that provide an X and y')
@property
def evaluator(self):
"""
The (PipeTorch) evaluator that is used to log training progress
"""
try:
return self._evaluator
except:
try:
self._evaluator = self.db.to_evaluator( *self.metrics )
except:
self._evaluator = Evaluator(self, *self.metrics)
return self._evaluator
def __repr__(self):
return 'Trainer( ' + self.model + ')'
def to(self, device):
"""
Configures the device to train on
Arguments:
device: bool, int or torch.device
The device to train on:
False or -1: cpu
True: cuda:0, this is probably what you want to train on gpu
int: cuda:gpu
Setting the device will automatically move the model and data to
the given device. Note that the model is not automatically
transfered back to cpu afterwards.
"""
if device is True or (type(device) == int and device == 0):
device = torch.device('cuda:0')
elif device is False or (type(device) == int and device == -1):
device = torch.device('cpu')
elif type(device) == int:
assert device < torch.cuda.device_count(), 'Cannot use gpu {device}, note that if a gpu has already been selected it is always renumbered to 0'
device = torch.device(f'cuda:{device}')
try:
if device != self.device:
self.device = device
try:
del self._optimizer
except: pass
except:
self.device = device
def cpu(self):
"""
Configure the trainer to train on cpu
"""
self.to(False)
def gpu(self, gpu=True):
"""
Configure the trainer to train on gpu, see to(device)
"""
self.to(gpu)
@property
def metrics(self):
"""
Returns: list of metrics that is collected while training
"""
return self._metrics
@metrics.setter
def metrics(self, value):
"""
Sets the metric(s) that are collected while training
"""
try:
iter(value)
self._metrics = value
except:
self._metrics = [] if value is None else [value]
@property
def databunch(self):
"""
Returns: the databunch that is used
thows an exception if no databunch has been configured
"""
return self._databunch
@databunch.setter
def databunch(self, db):
"""
Setter to use a databunch. The databunch object must have at least
a train_dl and a valid_dl property, and optional a test_dl. These
are often PyTorch DataLoaders, but can be any iterable over a
DataSet.
"""
assert hasattr(db, 'train_dl'), 'A single data source must be an object with a train_dl property (like a databunch)'
assert hasattr(db, 'valid_dl'), 'A single data source must be an object with a valid_dl property (like a databunch)'
self._databunch = db
self.train_dl = self.databunch.train_dl
self.valid_dl = self.databunch.valid_dl
try:
self.test_dl = self.databunch.test_dl
except: pass
@property
def lr(self):
"""
return: the learning rate that was set, could be an interval
"""
return self._lr
@lr.setter
def lr(self, lr):
"""
Sets the learning rate that is used for training. You can either use a single value
for a fixed lr, a tuple with an interval of two values for a linear decaying
scheduler, or a tuple with an interval of two values for a OneCyleLR scheduler.
The allocation of a scheduler can be overruled by setting a scheduler manually.
If the lr did not change, nothing happens, otherwise a new optimizer is created
when needed.
"""
if type(lr) is tuple:
lr = tuple(sorted(lr))
elif type(lr) is list:
lr = sorted(lr)
try:
if self.lr == lr:
return
except: pass
try:
del self._optimizer
except: pass
self._lr = lr
def set_lr(self, lr):
"""
sets the learning rate without changing the learning rate settings
the scheduler or optimizer. is used by tuners like find_lr.
"""
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
@property
def min_lr(self):
"""
the learning rate or lowest of an interval of learning rates
"""
try:
return self.lr[0]
except:
try:
return self.lr
except:
return 1e-2
@property
def max_lr(self):
"""
the learning rate or highest of an interval of learning rates
"""
try:
return self.lr[1]
except: pass
try:
return self.lr[0]
except: pass
return self.lr
def set_optimizer_param(self, key, value):
"""
Set a parameter for the optimizer. A 'nonondict' is used,
meaning that setting a value to None will cause the default
to be used.
Argument:
key: str
the key to use
value: any
the value to use. When set to None, the key is removed.
"""
self.optimizer_params[key] = value
try:
del self._optimizer
del self._scheduler
except: pass
@property
def weight_decay(self):
"""
Returns: the current value for the weight decay regularization
only works when using an Adam(W) optimizer
"""
return self.optimizer.param_groups[0]['weight_decay']
@weight_decay.setter
def weight_decay(self, value):
"""
Sets the weight decay regularization on the Adam(W) optimizer
"""
self.set_optimizer_param('weight_decay', value)
@property
def momentum(self):
"""
Returns the momentum value on the Adam(W) optimizer
"""
return self.optimizer.param_groups[0]['betas']
@momentum.setter
def momentum(self, value):
"""
Sets the momentum value on the Adam(W) optimizer
"""
self.set_optimizer_param('betas', value)
@property
def optimizer(self):
"""
Returns: an optimizer for training the model, using the applied
configuration (e.g. weight_decay, momentum, learning_rate).
If no optimizer exists, a new one is created using the configured
optimizerclass (default: AdamW) and settings.
"""
try:
return self._optimizer
except:
self.set_optimizer_param('lr', self.min_lr)
self._optimizer = self._optimizer_class(self.model.parameters(), **self.optimizer_params)
return self._optimizer
@optimizer.setter
def optimizer(self, value):
"""
Sets the optimizer class to use.
"""
self._optimizer_class = value
try:
del self._optimizer
del self._scheduler
except: pass
@property
def optimizer_params(self):
try:
return self._optimizer_params
except:
self._optimizer_params = nonondict()
return self._optimizer_params
@optimizer_params.setter
def optimizer_params(self, value):
"""
Setter for the optimizer parameters used, only applies them if
the value is set other than None. If you want to remove all
params, set them to an empty dict.
Arguments:
value: dict
conform the optimizer class that is used
"""
if value is not None:
assert instanceof(value) == dict, 'you have set optimizer_params to a dict'
self._optimizer_params = nonondict(value)
@property
def scheduler_params(self):
try:
return self._scheduler_params
except:
self._scheduler_params = nonondict()
return self._scheduler_params
@scheduler_params.setter
def scheduler_params(self, value):
"""
Setter for the scheduler parameters used, only applies them if
the value is set other than None. If you want to remove all
params, set them to an empty dict.
Arguments:
value: dict
conform the scheduler class/initializer that is used
"""
if value is not None:
assert instanceof(value) == dict, 'you have set scheduler_params to a dict'
self._optimizer_params = nonondict(value)
def del_optimizer(self):
try:
del self._optimizer
except: pass
self.del_scheduler()
def del_scheduler(self):
try:
del self._scheduler
except: pass
@property
def scheduler(self):
"""
Returns: scheduler that is used to adapt the learning rate
When you have set a (partial) function to initialze a scheduler, it should accepts
(optimizer, lr, scheduler_params) as its parameters. Otherwise, one of three standard
schedulers is used based on the value of the learning rate. If the learning rate is
- float: no scheduler is used
- [max, min]: a linear decaying scheduler is used.
- (max, min): a OneCyleLR scheduler is used.
"""
try:
return self._scheduler
except:
try:
#steps = int(round((len(self.train_dl) * self.cycle_epochs)))
if self._scheduler_class is None:
try:
self.lr[1]
if type(self.lr) == tuple:
schedulerclass = OneCycleLR
elif type(self.lr) == list:
schedulerclass = ConstantLR
else:
raise NotImplementedError(f'Provide either an single value learning rate for a Uniform scheduler, list [low, high] for a Linear Decay, or tuple (low, high) for a OneCycleLR scheduler')
except:
schedulerclass = UniformLR
else:
schedulerclass = self._scheduler_class
if schedulerclass == ConstantLR:
factor = (self.min_lr / self.max_lr) ** (1 / self._scheduler_epochs)
self._scheduler = ConstantLR(self.optimizer, factor,
self._scheduler_epochs, **self.scheduler_params)
elif schedulerclass == OneCycleLR:
scheduler_params = self.scheduler_params
scheduler_params['epochs'] = self._scheduler_epochs
scheduler_params['steps_per_epoch'] = len(self.train_dl)
self._scheduler = OneCycleLR(self.optimizer,
self.min_lr, **scheduler_params)
else:
self._scheduler = schedulerclass(self.optimizer,
self.lr, **self.scheduler_params)
except:
raise NotImplementedError(f'The provided function does not work with (optim, {self.lr}, {self._scheduler_epochs}, {len(self.train_dl)}) to instantiate a scheduler')
return self._scheduler
@scheduler.setter
def scheduler(self, value):
"""
Sets the schedulerclass (or function to initialize a scheduler) to use. At this moment,
there is no uniform way to initialize all PyTorch schedulers.
PipeTorch provides easy support for using a scheduler through the learning rate:
- float: no scheduler is used
- [max, min]: a linear annealing scheduler is used.
- (max, min): a OneCyleLR scheduler is used.
To use another scheduler, set this to a function that accepts
the following parameters: (optimizer instance, learning rate, **scheduler_params)
The scheduler_params can be supplied when calling train.
"""
try:
del self._scheduler
except: pass
self._scheduler_class = value
# @property
# def out_features(self):
# try:
# return self._out_features
# except: pass
# try:
# self._out_features = last_container(self.model).out_features
# return self._out_features
# except:
# print('cannot infer out_features from the model, please specify it in the constructor of the trainer')
# raise
# @property
# def in_features(self):
# first = next(iter(self._model.modules()))
# while type(first) is nn.Sequential:
# first = next(iter(first.modules()))
# return first.in_features
@property
def valid_ds(self):
return self.valid_dl.dataset
@property
def train_ds(self):
return self.train_dl.dataset
@property
def test_ds(self):
return self.test_dl.dataset
@property
def train_Xy(self):
for batch in self.train_dl:
yield [ t.to(self.model.device) for t in batch ]
@property
def valid_Xy(self):
for batch in self.valid_dl:
yield [ t.to(self.model.device) for t in batch ]
@property
def test_Xy(self):
for batch in self.test_dl:
yield [ t.to(self.model.device) for t in batch ]
@property
def valid_tensors(self):
return self.valid_dl.dataset.tensors
@property
def train_tensors(self):
return self.train_dl.dataset.tensors
@property
def test_tensors(self):
return self.test_dl.dataset.tensors
@property
def train_X(self):
return self.train_tensors[0]
@property
def train_y(self):
return self.train_tensors[-1]
@property
def valid_X(self):
return self.valid_tensors[0]
@property
def valid_y(self):
return self.valid_tensors[-1]
@property
def test_X(self):
return self.test_tensors[0]
@property
def test_y(self):
return self.test_tensors[-1]
@property
def model(self):
"""
When a device is configured to train the model on, the model
is automatically transferred to the device. A device property
is set on the model to transfer the data to the same device
as the model before using.
Returns: the model
"""
try:
if self.device is not self._model.device:
self._model.device = self.device
self._model.to(self.device)
try:
del self._optimizer
except: pass
except:
try:
self._model.device = self.device
self._model.to(self.device)
#print('change device')
try:
del self._optimizer
except: pass
except: pass
return self._model
def parameters(self):
"""
Prints the (trainable) model parameters
"""
for name, param in self.model.named_parameters():
if param.requires_grad:
print(name, param.data)
def forward(self, *X):
"""
Returns the results of the model's forward on the given input X.
Arguments:
*X: tensor or collection of tensors
the tensor of collection of tensors that is passed to
the forward of the model. The inputs are automatically
transfered to the same device as the model is on.
Returns: tensor
outputs that are returned by first the forward pass on
the model.
"""
X = [ x.to(self.model.device) for x in X ]
return self.model(*X)
def predict(self, *X):
"""
Returns model predictions for the given input.
The difference with forward is that the outputs of the model
are optionally processed by a post_forward (for classification).
Arguments:
*X: tensor or collection of tensors
the tensor of collection of tensors that is passed to
the forward of the model. The inputs are automatically
transfered to the same device as the model is on.
Returns: tensor
Predictions that are returned by first the forward pass on
the model and optionally a post_forward for classification
tasks
"""
return self.post_forward(self.forward(*X))
def post_forward(self, y):
"""
For classification tasks, training may require a different
pred_y than the evaluation metrics do. Typically, the predictions
are logits or an estimated likelihood (e.g. 0.2), while the
evaluation function need a class label (e.g. 0 or 1). Using
PipeTorch, you need to add a post_forward(y) method to your model,
that will be called on the predictions before they are passed
to the evaluation functions.
Returns: tensor
If the model has a post_forward to convert pred_y to predictions,
this returns the the results calling post_forward, otherise,
it will just return pred_y
"""
post_forward = getattr(self.model, "post_forward", None)
if callable(post_forward):
return self.model.post_forward(y)
return y
def list_commits(self):
"""
Returns: a list of the keys of committed (saved) models, during
or after training.
"""
return self._commit.keys()
def commit(self, label):
"""
Save the model and optimizer state, allowing to revert to a
previous state/version of the model.
Arguments:
label: str
The key to save the model under
"""
model_state = copy.deepcopy(self.model.state_dict())
optimizer_state = copy.deepcopy(self.optimizer.state_dict())
self._commit[label] = (model_state, optimizer_state)
def _model_filename(self, folder=None, filename=None, extension=None):
if folder is None:
folder = '.'
if filename is not None:
path = f'{folder}/{filename}'
else:
path = f'{folder}/{self.model.__class__.__name__}'
if '.pyt' not in path:
if extension is None:
return f'{path}.pyt{torch.__version__}'
else:
return f'{path}.{extension}'
return path
def save(self, folder=None, filename=None, extension=None):
"""
Saves a (trained) model to file. This will only save the model parameters. To load the model, you will
first have to initialize a model with the same configuration, and then use trainer.load(path) to load
the model from file.
Aruments:
folder: str (None)
folder to save the model, default is the current folder
filename: str (None)
the basename of the saved file, default is the classname
extension: str (None)
the extension of the saved file, default is pyt with the pytorch version name
"""
path = self._model_filename(folder, filename, extension)
torch.save(self.model.state_dict(), path)
print(f'Saved the model as {path}')
def load(self, folder=None, filename=None, extension=None):
"""
Load a saved (trained) model from file. For this to work, the model for this trainer has to be configured
in the exact same way as the model that was saved. This will only load the model parameters.
Aruments:
folder: str (None)
folder to save the model, default is the current folder
filename: str (None)
the basename of the saved file, default is the classname
extension: str (None)
the extension of the saved file, default is pyt with the pytorch version name
"""
self.model.load_state_dict(torch.load(self._model_filename(folder, filename, extension)))
def to_trt(self):
"""
Converts the (trained) model into a TRT model that can be used on a Jetson
Returns: TRTModule
The converted model
"""
from torch2trt import torch2trt
x = next(iter(self.train_Xy))[0]
print(x.shape)
return torch2trt(self.model, [x])
def save_trt(self, folder=None, filename=None, extension='trt'):
"""
Converts the (trained) model to TRT and saves it.
Aruments:
folder: str (None)
folder to save the model, default is the current folder
filename: str (None)
the basename of the saved file, default is the classname
extension: str ('trt')
the extension of the saved file
"""
path = self._model_filename(folder, filename, extension)
torch.save(self.to_trt().state_dict(), path)
print(f'Saved the TRT model as {path}')
def save_onnx(self, folder=None, filename=None, extension='onnx'):
"""
Converts the (trained) model to ONNX and saves it.
Aruments:
folder: str (None)
folder to save the model, default is the current folder
filename: str (None)
the basename of the saved file, default is the classname
extension: str ('onnx')
the extension of the saved file
"""
path = self._model_filename(folder, filename, extension)
x = next(iter(self.train_Xy))[0][:1]
torch.onnx.export(self.model, x, path, verbose=True)
print(f'Saved the ONNX model as {path}')
def revert(self, label):
"""
Revert the model and optimizer to a previously commited state,
and deletes the commit point to free memory. Prints a warning
when the label was not found.
Arguments:
label: str
The key under which the model was commited
"""
if label in self._commit:
model_state, optimizer_state = self._commit.pop(label)
self.model.load_state_dict(model_state)
self.del_optimizer()
self.optimizer.load_state_dict(optimizer_state)
else:
print('commit point {label} not found')
def checkout(self, label):
"""
Loads a previously commited state of the model and optimizer
but keeps the commit point. Prints a warning
when the label was not found.
Arguments:
label: str
The key under which the model was commited
"""
if label in self._commit:
model_state, optimizer_state = self._commit[label]
self.model.load_state_dict(model_state)
self.del_optimizer()
self.optimizer.load_state_dict(optimizer_state)
else:
print('commit point {label} not found')
def remove_checkpoint(self, label):
"""
Removes a previously committed state of the model.
Arguments:
label: str
The key under which the model was commited
"""
self._commit.pop(label)
def purge(self, label):
"""
Switches the model and optimizer to a previously commited state,
and keeps only that commit point and removes all other versions.
Arguments:
label: str
The key under which the model was commited
"""
if label in self._commit:
self.checkout(label)
self._commit = { l:s for l, s in self._commit.items() if l == label }
else:
print(f'commit point {label} not found')
def _loss_xy(self, *X, y=None):
"""
Computes predictions for the given X.
Arguments:
*X: tensor
inputs that are used by the forward of the model
y: tensor
ground truth labels, the predictions are compared against
Returns: (float, tensor)
a tuple with the loss for the predictions on X,
and a tensor with the predicted values
"""
assert y is not None, 'Call _loss_xy with y=None'
y_pred = self.forward(*X)
return self.loss(y_pred, y), self.post_forward(y_pred)
def loss_dl(self, dl):
"""
Iterates over the given dataloader, the loss is computed in
evaluation mode and accumulated over the dataset.
Arguments:
dl: DataLoader
the dataloader that is used to iterate over.
Returns: float
weighted average loss over the given dataloader/set.
"""
if not dl:
dl = self.valid_Xy
losses = []
leny = 0
for *X, y in dl:
y_pred = self.forward(*X)
l = self.loss(y_pred, y)
losses.append(l.item() * len(y))
leny += len(y)
return sum(losses) / leny
def validate_loss(self):
"""
Returns: weighted average loss over the validation set, or
the data that is provided.
"""
return self.loss_dl(self.valid_Xy)
@property
def eval_mode(self):
"""
A ContextManager to put the model in evaluation mode
"""
class CM(object):
def __init__(self, trainer):
self.trainer = trainer
def __enter__(self):
self.trainer.model.eval()
self.prev = torch.is_grad_enabled()
torch.set_grad_enabled(False)
return self.trainer.model
def __exit__(self, type, value, traceback):
torch.set_grad_enabled(self.prev)
self.trainer.model.train()
return CM(self)
@property
def train_mode(self):
"""
A ContextManager to put the model in training mode
"""
class CM(object):
def __init__(self, trainer):
self.trainer = trainer
def __enter__(self):
self.trainer.model.train()
self.prev = torch.is_grad_enabled()
torch.set_grad_enabled(True)
return self.trainer.model
def __exit__(self, type, value, traceback):
torch.set_grad_enabled(self.prev)
self.trainer.model.eval()
return CM(self)
def validate(self, pbar=None, log={}):
"""
Run the validation set (in evaluation mode) and store the loss and metrics into the evaluator.
Arguments:
pbar: tqdm progress bar (None)
if not None, progress is reported on the progress bar
log: dict
additional labels to log when storing the results in the evaluator.
Returns: float
weighted average loss over the validation set
"""
epochloss = 0
n = 0
epoch_y_pred = []
epoch_y = []
with self.eval_mode:
for *X, y in self.valid_Xy:
loss, y_pred = self._loss_xy(*X, y=y)
epochloss += loss.item() * len(y_pred)
n += len(y_pred)
epoch_y_pred.append(to_numpy(y_pred))
epoch_y.append(to_numpy(y))
if pbar is not None:
pbar.update(self.valid_dl.batch_size)
epochloss /= n
epoch_y = np.concatenate(epoch_y, axis=0)
epoch_y_pred = np.concatenate(epoch_y_pred, axis=0)
self.evaluator._store(epoch_y, epoch_y_pred, loss=epochloss, phase='valid', epoch=self.epochid, **log)
return epochloss
def train_batch(self, *X, y=None):
"""
Train the model on a single batch X, y. The model should already
be in training mode.
Arguments:
*X: tensor
inputs that are used by the forward of the model
y: tensor
ground truth labels, the predictions are compared against
Returns: (float, tensor)
a tuple with the loss for the predictions on X,
and a tensor with the predicted values
"""
self.optimizer.zero_grad()
loss, y_pred = self._loss_xy(*X, y=y)
loss.backward()
self.optimizer.step()
return loss, y_pred
def _time(self):
try:
t = self._start_time
except:
t = timeit.default_timer()
self._start_time = timeit.default_timer()
return timeit.default_timer() - t
def train(self, epochs, lr=None, report_frequency=None, save=None, optimizer=None, optimizer_params=None, scheduler=False, scheduler_params=None, weight_decay=None, momentum=None, save_lowest=None, save_highest=None, log={}):
"""
Train the model for the given number of epochs. Loss and metrics
are logged during training in an evaluator. If a model was already
(partially) trained, training will continue where it was left off.
Arguments:
epochs: int
the number of epochs to train the model
lr: float, tuple of floats, or list of floats
float: set the learning
(upper, lower): switch the scheduler to OneCycleLR and
use a cyclic annealing learning rate
between an upper and lower bound.
[upper, lower]: switch the scheduler to Linear Decay and
use a linearly decaying learning rate
between an upper and lower bound.
report_frequency: int
configures after how many epochs the loss and metrics are
logged and reported during training. This is remembered
for consecutive calls to train.
save: str (None)
If not None, saves (commits) the model after each reported
epoch, under the name 'save'-epochnr
optimizer: PyTorch Optimizer (None)
If not None, changes the optimizer class to use.
optimizer_params: dict (None)
If not None, the parameters to configure the optimizer.
scheduler: None, custom scheduler class
used to adapt the learning rate. Set OneCycleLR or Linear Decay
through the learning rate. Otherwise, provide a custom
class/function to initialize a scheduler by accepting
(optimizer, learning_rate, scheduler_cycle)
scheduler_params: dict (None)
additional parameters that are passed when initializing the scheduler
weight_decay: float
Apply weight_decay regularization with the AdamW optimizer
momentum: float
Apply momentum with the AdamW optimizer
save_lowest: bool (False)
Automatically commit/save the model when reporting an epoch and the validation loss is lowest
than seen before. The model is saved as 'lowest' and can be checked out by calling lowest()
on the trainer.
"""
self._scheduler_start = self.epochid # used by OneCycleScheduler
self._scheduler_epochs = epochs
self.scheduler_params = scheduler_params
self.del_optimizer()
self.lr = lr or self.lr
if weight_decay is not None and self.weight_decay != weight_decay:
self.weight_decay = weight_decay
if momentum is not None and self.momentum != momentum:
self.momentum = momentum
if optimizer and self._optimizerclass != optimizer:
self.optimizer = optimizer
if scheduler is not False:
self.scheduler = scheduler
self.report_frequency = report_frequency or self.report_frequency
model = self.model
torch.set_grad_enabled(False)
reports = math.ceil(epochs / self.report_frequency)
maxepoch = self.epochid + epochs
epochspaces = int(math.log(maxepoch)/math.log(10)) + 1
batches = len(self.train_dl) * self.train_dl.batch_size * epochs + len(self.valid_dl) * self.valid_dl.batch_size * reports
pbar = tqdm(range(batches), desc='Total', leave=False)
self._time()
for i in range(epochs):
self.epochid += 1
epochloss = 0
n = 0
epoch_y_pred = []
epoch_y = []
self.scheduler
report = (((i + 1) % self.report_frequency) == 0 or i == epochs - 1)
with self.train_mode:
for *X, y in self.train_Xy:
loss, y_pred = self.train_batch(*X, y=y)
self.scheduler.step()
try:
# TODO naam aanpassen
y_pred = model.post_forward(y_pred)
except: pass
if report:
epochloss += loss.item() * len(y_pred)
n += len(y_pred)
epoch_y_pred.append(to_numpy(y_pred))
epoch_y.append(to_numpy(y))
pbar.update(self.train_dl.batch_size)
if report:
epochloss /= n
epoch_y = np.concatenate(epoch_y, axis=0)
epoch_y_pred = np.concatenate(epoch_y_pred, axis=0)
self.evaluator._store(epoch_y, epoch_y_pred, loss=epochloss, phase='train', epoch=self.epochid, **log)
validloss = self.validate(pbar = pbar, log=log)
metric = ''
v = self.evaluator.valid.iloc[-1]
for m in self.metrics:
m = m.__name__
value = v[m]
try:
metric += f'{m}={value:.5f} '
except: pass
print(f'{self.epochid:>{epochspaces}} {self._time():.2f}s trainloss={epochloss:.5f} validloss={validloss:.5f} {metric}')
if save is not None:
self.commit(f'{save}-{self.epochid}')
if save_lowest is not None:
if self.lowest_score is None or validloss < self.lowest_score:
self.lowest_score = validloss
self.commit('lowest')
def lowest(self):
"""
Checkout the model with the lowest validation loss, that was committed when training with save_lowest=True
"""
self.checkout('lowest')
def learning_curve(self, y='loss', series='phase', select=None, xlabel = None, ylabel = None, title=None, label_prefix='', **kwargs):
"""
Plot a learning curve with the train and valid loss on the y-axis over the epoch on the x-axis.
The plot is generated by the evaluator that logged training progress. By default the evaluator logs:
- epoch: the epoch number
- phase: 'train' or 'valid'
- loss: the weighted average loss
under the name of each metric function, the resulting value when called with (y, y_pred)
and the additional values that are passed to train() through the log parameter.
Arguments:
y: str or function
the metric that is used for the y-axis. It has to be a metric that was collected during training.
if a function is passed, the name of the function is used.
series: str ('phase')
the label to use as a series. By default, 'phase' is used to plot both the train and valid results.
select: see evaluator.select
using the values 'train' and 'valid' you can select to plot only the train or valid sets.
xlabel: str
the label used on the x-axis
ylabel: str
the label used on the y-axis
title: str
the title of the plot
label_prefix: str
prefixes the label, so that you can combine a plot with results from different metrics or models
**kwargs: dict
forwarded to matplotlib's plot or scatter function
"""
return self.evaluator.line_metric(x='epoch', series=series, select=select, y=y, xlabel = xlabel, ylabel = ylabel, title=title, label_prefix=label_prefix, **kwargs)
def validation_curve(self, y=None, x='epoch', series='phase', select=None, xlabel = None, ylabel = None, title=None, label_prefix='', **kwargs):
"""
Plot a metric for the train and valid set, over epoch on the x-axis. The plot is generated by the evaluator
that logged training progress. By default the evaluator logs:
- epoch: the epoch number
- phase: 'train' or 'valid'
- loss: the weighted average loss
under the name of each metric function, the resulting value when called with (y, y_pred)
and the additional values that are passed to train() through the log parameter.
Arguments:
y: str or function
the metric that is used for the y-axis. It has to be a metric that was collected during training.
if a function is passed, the name of the function is used.
x: str ('epoch')
the label used for the x-axis.
series: str ('phase')
the label to use as a series. By default, 'phase' is used to plot both the train and valid results.
select: see evaluator.select
using the values 'train' and 'valid' you can select to plot only the train or valid sets.
xlabel: str
the label used on the x-axis
ylabel: str
the label used on the y-axis
title: str
the title of the plot
label_prefix: str
prefixes the label, so that you can combine a plot with results from different metrics or models
**kwargs: dict
forwarded to matplotlib's plot or scatter function
"""
if y is not None and type(y) != str:
y = y.__name__
return self.evaluator.line_metric(x=x, series=series, select=select, y=y, xlabel = xlabel, ylabel = ylabel, title=title, label_prefix=label_prefix, **kwargs)
def freeze(self, last=-1):
"""
Mostly used for transfer learning, to freeze all parameters of a model, until the given layer (exclusive).
Arguments:
last: int (-1)
Freeze all layers up to this layer number. -1 is the last layer.
"""
for c in list(self.model.children())[:last]:
for p in c.parameters():
p.requires_grad=False
def unfreeze(self):
"""
Mostly used for transfer learning, to unfreeze all parameters of a model.
"""
for c in list(self.model.children()):
for p in c.parameters():
p.requires_grad=True
def tune(self, params,setter, lr=[1e-6, 1e-2], steps=40, smooth=0.05, label=None, **kwargs):
lr_values = exprange(*lr, steps)
if label is None:
label = str(setter)
if len(params) == 2:
params = range3(*params)
with tuner(self, lr_values, self.set_lr, smooth=0.05, label=label) as t:
t.run_multi(params, setter)
def tune_weight_decay(self, lr=[1e-6,1e-4], params=[1e-6, 1], steps=40, smooth=0.05, yscale='log', **kwargs):
self.tune( params, partial(self.set_optimizer_param, 'weight_decay'), lr=lr, steps=steps, smooth=smooth, label='weight decay', yscale=yscale, **kwargs)
def lr_find(self, lr=[1e-6, 10], steps=40, smooth=0.05, cache_valid=True, **kwargs):
"""
Run a learning rate finder on the dataset (as propesed by <NAME> and implemented in FastAI).
This saves the model, then starting with a very low learning rate
iteratively trains the model on a single mini-batch and logs the loss on the validation set. Gradually, the
learning rate is raised. The idea is that the graph contains information on a stable setting of the learning
rate. This does not always work, and often after some training, if learning is not stable, the learning rate
still needs to be adjusted.
The result is a plot of the validation loss over the change in learning rate.
Arguments:
lr: [small float, big float] ([1e-6, 10])
Interval of learning rates to inspect
steps: int (40)
number of (exponential) steps to divide the learning rate interval in
smooth: float (0.05)
smoothing parameter, to generate a more readable graph
cache_valid: bool (True)
whether to keep the validation set if possible in memory. Switch of if there is insufficient memory
"""
with tuner(self, exprange(lr[0], lr[1], steps), self.set_lr, label='lr', yscale='log', smooth=smooth, cache_valid=cache_valid, **kwargs) as t:
t.run()
```
|
{
"source": "jeroenwinkelhorst/ToHyDAMOgml",
"score": 3
}
|
#### File: ToHyDAMOgml/tohydamogml/read_filegdb.py
```python
import fiona
import geopandas as gpd
import pandas as pd
from tohydamogml.config import COLNAME_OID
def read_filegdb(filegdb, layer):
"""Read filegdb with fiona to get original objectid. Return geopandas dataframe or pandas dataframe"""
if layer in fiona.listlayers(filegdb):
features = _yield_features(filegdb, layer)
if next(features)["geometry"] is not None:
gdf = gpd.GeoDataFrame.from_features(features, crs=get_crs(filegdb, layer))
gdf[COLNAME_OID] = gdf[COLNAME_OID].astype(int)
return gdf
else:
df = pd.DataFrame.from_records(_yield_table(filegdb, layer))
df[COLNAME_OID] = df[COLNAME_OID].astype(int)
return df
else:
raise ValueError(f"layer '{layer}' not in layer list: {fiona.listlayers(filegdb)}")
def _yield_features(path, layer, colname_oid=COLNAME_OID):
"""Read filegdb with fiona to get original objectid"""
with fiona.open(path, 'r', layer=layer) as f:
for feature in f:
feature['properties'][colname_oid] = feature['id']
yield feature
def _yield_table(path, layer, colname_oid=COLNAME_OID):
"""Read filegdb table with fiona to get original objectid"""
with fiona.open(path, 'r', layer=layer) as f:
for feature in f:
feature['properties'][colname_oid] = feature['id']
yield feature['properties']
def get_crs(path, layer):
with fiona.open(path, 'r', layer=layer) as f:
if type(f.crs) == dict:
if 'init' in f.crs.keys():
return f.crs['init']
return None
```
|
{
"source": "jeroenzeegers/panopuppet",
"score": 2
}
|
#### File: pano/views/analytics.py
```python
import pytz
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views.decorators.cache import cache_page
from pano.puppetdb.pdbutils import run_puppetdb_jobs, json_to_datetime
from pano.puppetdb.puppetdb import set_server, get_server
from pano.settings import AVAILABLE_SOURCES, CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def analytics(request):
context = {'timezones': pytz.common_timezones,
'SOURCES': AVAILABLE_SOURCES}
if request.method == 'GET':
if 'source' in request.GET:
source = request.GET.get('source')
set_server(request, source)
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect(request.POST['return_url'])
source_url, source_certs, source_verify = get_server(request)
events_class_params = {
'query':
{
1: '["and",["=","latest_report?",true],["in","certname",["extract","certname",["select_nodes",["null?","deactivated",true]]]]]'
},
'summarize_by': 'containing_class',
}
events_resource_params = {
'query':
{
1: '["and",["=","latest_report?",true],["in","certname",["extract","certname",["select_nodes",["null?","deactivated",true]]]]]'
},
'summarize_by': 'resource',
}
events_status_params = {
'query':
{
1: '["and",["=","latest_report?",true],["in","certname",["extract","certname",["select_nodes",["null?","deactivated",true]]]]]'
},
'summarize_by': 'resource',
}
reports_runavg_params = {
'limit': 100,
'order_by': {
'order_field': {
'field': 'receive_time',
'order': 'desc',
},
'query_field': {'field': 'certname'},
},
}
jobs = {
'events_class_list': {
'url': source_url,
'certs': source_certs,
'verify': source_verify,
'id': 'events_class_list',
'path': '/event-counts',
'api_version': 'v4',
'params': events_class_params,
'request': request
},
'events_resource_list': {
'url': source_url,
'certs': source_certs,
'verify': source_verify,
'id': 'events_resource_list',
'path': '/event-counts',
'api_version': 'v4',
'params': events_resource_params,
'request': request
},
'events_status_list': {
'url': source_url,
'certs': source_certs,
'verify': source_verify,
'id': 'events_status_list',
'path': '/aggregate-event-counts',
'api_version': 'v4',
'params': events_status_params,
'request': request
},
'reports_run_avg': {
'url': source_url,
'certs': source_certs,
'verify': source_verify,
'id': 'reports_run_avg',
'path': '/reports',
'api_version': 'v4',
'params': reports_runavg_params,
'request': request
},
}
job_results = run_puppetdb_jobs(jobs, 4)
reports_run_avg = job_results['reports_run_avg']
events_class_list = job_results['events_class_list']
events_resource_list = job_results['events_resource_list']
events_status_list = job_results['events_status_list']
num_runs_avg = len(reports_run_avg)
run_avg_times = []
avg_run_time = 0
for report in reports_run_avg:
run_time = "{0:.0f}".format(
(json_to_datetime(report['end_time']) - json_to_datetime(report['start_time'])).total_seconds())
avg_run_time += int(run_time)
run_avg_times.append(run_time)
if num_runs_avg != 0:
avg_run_time = "{0:.0f}".format(avg_run_time / num_runs_avg)
else:
avg_run_time = 0
class_event_results = []
class_resource_results = []
class_status_results = []
for item in events_class_list:
class_name = item['subject']['title']
class_total = item['skips'] + item['failures'] + item['noops'] + item['successes']
class_event_results.append((class_name, class_total))
for item in events_resource_list:
class_name = item['subject']['type']
class_total = item['skips'] + item['failures'] + item['noops'] + item['successes']
class_resource_results.append((class_name, class_total))
print(events_status_list)
if events_status_list:
for status, value in events_status_list[0].items():
print(status, value)
if value is 0 or status == 'total' or status == 'summarize_by':
continue
class_status_results.append((status, value))
context['class_events'] = class_event_results
context['class_status'] = class_status_results
context['resource_events'] = class_resource_results
context['run_times'] = run_avg_times
context['run_num'] = num_runs_avg
context['run_avg'] = avg_run_time
return render(request, 'pano/analytics/analytics.html', context)
```
#### File: views/api/report_agent_log.py
```python
import arrow
import json
from django.contrib.auth.decorators import login_required
from django.shortcuts import HttpResponse
from django.template import defaultfilters as filters
from django.utils.timezone import localtime
from django.views.decorators.cache import cache_page
from pano.puppetdb import puppetdb
from pano.puppetdb.puppetdb import get_server
from pano.settings import CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def report_log_json(request, report_hash=None):
source_url, source_certs, source_verify = get_server(request)
# Redirects to the events page if GET param latest is true..
context = {}
if report_hash is None:
context['error'] = 'Report Hash not provided.'
return HttpResponse(json.dumps(context), content_type="application/json")
report_logs = puppetdb.api_get(
api_url=source_url,
cert=source_certs,
verify=source_verify,
path='/reports/' + report_hash + '/logs',
api_version='v4',
)
if 'error' in report_logs:
context = report_logs
return HttpResponse(json.dumps(context), content_type="application/json")
# Remove the dict from the list...
for log in report_logs:
# Parse... 2015-09-18T18:02:04.753163330+02:00
# Puppetlabs... has a super long millisecond counter (9 digits!!!)
# We need to trim those down...
time = log['time'][0:26] + log['time'][-6:-3] + log['time'][-2:]
time = arrow.get(time).to('UTC').datetime
log['time'] = filters.date(localtime(time), 'Y-m-d H:i:s')
context['agent_log'] = report_logs
context['report_hash'] = report_hash
return HttpResponse(json.dumps(context), content_type="application/json")
```
#### File: views/api/report_data.py
```python
import json
from django.contrib.auth.decorators import login_required
from django.shortcuts import HttpResponse
from django.template import defaultfilters as filters
from django.utils.timezone import localtime
from django.views.decorators.cache import cache_page
from pano.puppetdb import puppetdb
from pano.puppetdb.pdbutils import json_to_datetime
from pano.puppetdb.puppetdb import get_server
from pano.settings import CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def reports_json(request, certname=None):
source_url, source_certs, source_verify = get_server(request)
# Redirects to the events page if GET param latest is true..
context = {}
# Cur Page Number
if request.GET.get('page', False):
if request.session['report_page'] != int(request.GET.get('page', 1)):
request.session['report_page'] = int(request.GET.get('page', 1))
if request.session['report_page'] <= 0:
request.session['report_page'] = 1
else:
if 'report_page' not in request.session:
request.session['report_page'] = 1
if request.session['report_page'] <= 0:
offset = 0
else:
offset = (25 * request.session['report_page']) - 25
reports_params = {
'query':
{
1: '["=","certname","' + certname + '"]'
},
'order_by':
{
'order_field':
{
'field': 'start_time',
'order': 'desc',
},
},
'limit': 25,
'include_total': 'true',
'offset': offset,
}
reports_list, headers = puppetdb.api_get(
api_url=source_url,
cert=source_certs,
verify=source_verify,
path='/reports',
api_version='v4',
params=puppetdb.mk_puppetdb_query(
reports_params, request),
)
# Work out the number of pages from the xrecords response
xrecords = headers['X-Records']
num_pages_wdec = float(xrecords) / 25
num_pages_wodec = float("{:.0f}".format(num_pages_wdec))
if num_pages_wdec > num_pages_wodec:
num_pages = num_pages_wodec + 1
else:
num_pages = num_pages_wodec
report_status = []
for report in reports_list:
found_report = False
events_params = {
'query':
{
1: '["=","report","' + report['hash'] + '"]'
},
'summarize_by': 'certname',
}
eventcount_list = puppetdb.api_get(
path='event-counts',
api_url=source_url,
api_version='v4',
params=puppetdb.mk_puppetdb_query(events_params, request),
)
# Make list of the results
for event in eventcount_list:
if event['subject']['title'] == report['certname']:
found_report = True
report_status.append({
'hash': report['hash'],
'certname': report['certname'],
'environment': report['environment'],
'start_time': filters.date(localtime(json_to_datetime(report['start_time'])), 'Y-m-d H:i:s'),
'end_time': filters.date(localtime(json_to_datetime(report['end_time'])), 'Y-m-d H:i:s'),
'events_successes': event['successes'],
'events_noops': event['noops'],
'events_failures': event['failures'],
'events_skipped': event['skips'],
'report_status': report['status'],
'config_version': report['configuration_version'],
'run_duration': "{0:.0f}".format(
(json_to_datetime(report['end_time']) - json_to_datetime(report['start_time'])).total_seconds())
})
break
if found_report is False:
report_status.append({
'hash': report['hash'],
'certname': report['certname'],
'environment': report['environment'],
'start_time': filters.date(localtime(json_to_datetime(report['start_time'])), 'Y-m-d H:i:s'),
'end_time': filters.date(localtime(json_to_datetime(report['end_time'])), 'Y-m-d H:i:s'),
'events_successes': 0,
'events_noops': 0,
'events_failures': 0,
'events_skipped': 0,
'report_status': report['status'],
'config_version': report['configuration_version'],
'run_duration': "{0:.0f}".format(
(json_to_datetime(report['end_time']) - json_to_datetime(report['start_time'])).total_seconds())
})
context['certname'] = certname
context['reports_list'] = report_status
context['curr_page'] = request.session['report_page']
context['tot_pages'] = "{:.0f}".format(num_pages)
return HttpResponse(json.dumps(context), content_type="application/json")
def reports_search_json(request):
context = dict()
if request.method == 'GET':
if 'search' in request.GET:
search = request.GET.get('search')
if 'certname' in request.GET:
certname = request.GET.get('certname')
if not certname or not search:
context['error'] = 'Must specify both certname and search query.'
return HttpResponse(json.dumps(context), content_type="application/json")
source_url, source_certs, source_verify = get_server(request)
# Redirects to the events page if GET param latest is true..
reports_params = {
'query':
{
'operator': 'and',
1: '["=","certname","' + certname + '"]',
2: '["~","hash","^' + search + '"]'
},
'order_by':
{
'order_field':
{
'field': 'start_time',
'order': 'desc',
},
}
}
reports_list = puppetdb.api_get(
path='/reports',
api_url=source_url,
api_version='v4',
params=puppetdb.mk_puppetdb_query(reports_params, request),
)
return HttpResponse(json.dumps(reports_list), content_type="application/json")
```
#### File: pano/views/event_analytics.py
```python
import pytz
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views.decorators.cache import cache_page
from pano.methods import events
from pano.puppetdb.puppetdb import set_server
from pano.settings import AVAILABLE_SOURCES, CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def event_analytics(request, view='summary'):
context = {'timezones': pytz.common_timezones,
'SOURCES': AVAILABLE_SOURCES}
if request.method == 'GET':
if 'source' in request.GET:
source = request.GET.get('source')
set_server(request, source)
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect(request.POST['return_url'])
summary = events.get_events_summary(timespan='latest', request=request)
context['summary'] = summary
# Show Classes
if request.GET.get('value', False):
if view == 'classes':
class_name = request.GET.get('value')
title = "Class: %s" % class_name
class_events = events.get_report(key='containing_class', value=class_name, request=request)
context['events'] = class_events
# Show Nodes
elif view == 'nodes':
node_name = request.GET.get('value')
title = "Node: %s" % node_name
node_events = events.get_report(key='certname', value=node_name, request=request)
context['events'] = node_events
# Show Resources
elif view == 'resources':
resource_name = request.GET.get('value')
title = "Resource: %s" % resource_name
resource_events = events.get_report(key='resource_title', value=resource_name, request=request)
context['events'] = resource_events
# Show Types
elif view == 'types':
type_name = request.GET.get('value')
title = "Type: %s" % type_name
type_events = events.get_report(key='resource_type', value=type_name, request=request)
context['events'] = type_events
# Show summary if none of the above matched
else:
sum_avail = ['classes', 'nodes', 'resources', 'types']
stat_avail = ['failed', 'noop', 'success', 'skipped'
'']
show_summary = request.GET.get('show_summary', 'classes')
show_status = request.GET.get('show_status', 'failed')
if show_summary in sum_avail and show_status in stat_avail:
title = "%s with status %s" % (show_summary.capitalize(), show_status.capitalize())
context['show_title'] = title
else:
title = 'Failed Classes'
context['show_title'] = title
return render(request, 'pano/analytics/events_details.html', context)
# Add title to context
context['show_title'] = title
# if the above went well and did not reach the else clause we can also return the awesome.
return render(request, 'pano/analytics/events_inspect.html', context)
```
#### File: pano/views/radiator.py
```python
import pytz
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views.decorators.cache import cache_page
from pano.puppetdb import puppetdb
from pano.puppetdb.puppetdb import set_server, get_server
from pano.settings import AVAILABLE_SOURCES, CACHE_TIME, NODES_DEFAULT_FACTS
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def radiator(request, certname=None):
context = {'timezones': pytz.common_timezones,
'SOURCES': AVAILABLE_SOURCES}
if request.method == 'GET':
if 'source' in request.GET:
source = request.GET.get('source')
set_server(request, source)
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect(request.POST['return_url'])
context['certname'] = certname
context['node_facts'] = ','.join(NODES_DEFAULT_FACTS)
return render(request, 'pano/radiator.html', context)
```
#### File: pano/views/report_agent_logs.py
```python
import pytz
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views.decorators.cache import cache_page
from pano.puppetdb.puppetdb import set_server
from pano.settings import AVAILABLE_SOURCES, CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME * 60) # Cache for cache_time multiplied 60 because the report will never change...
def agent_logs(request, certname=None, report_hash=None):
context = {'timezones': pytz.common_timezones,
'SOURCES': AVAILABLE_SOURCES}
if request.method == 'GET':
if 'source' in request.GET:
source = request.GET.get('source')
set_server(request, source)
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect(request.POST['return_url'])
context['certname'] = certname
context['report_hash'] = report_hash
return render(request, 'pano/report_agent_logs.html', context)
```
#### File: panopuppet/tests/test_puppetdb_functions.py
```python
from django.test import TestCase
from pano.puppetdb.puppetdb import mk_puppetdb_query
__author__ = 'etaklar'
class CreatePuppetdbQueries(TestCase):
def test_single_search_query(self):
content = {
'query':
{
1: '["=","certname","hostname.example.com"]'
},
}
expected_results = {
'query': '["and",["=","certname","hostname.example.com"]]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_double_search_query_with_operator(self):
content = {
'query':
{
'operator': 'and',
1: '["=","hash","e4fug294hf3293hf9348g3804hg3084h"]',
2: '["=","latest_report?",true]'
},
}
expected_results = {
'query': '["and",["=","hash","e4fug294hf3293hf9348g3804hg3084h"],["=","latest_report?",true]]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_single_search_query_with_operator(self):
content = {
'query':
{
'operator': 'and',
1: '["=","hash","e4fug294hf3293hf9348g3804hg3084h"]',
},
}
expected_results = {
'query': '["and",["=","hash","e4fug294hf3293hf9348g3804hg3084h"]]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_summarize_by_query(self):
content = {
'summarize_by': 'containing_class',
}
expected_results = {
'summarize_by': 'containing_class'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_summarize_by_query_with_single_search_query(self):
content = {
'query':
{
1: '["=","certname","hostname.example.com"]'
},
'summarize_by': 'containing_class',
}
expected_results = {
'query': '["and",["=","certname","hostname.example.com"]]',
'summarize_by': 'containing_class'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_order_by_query(self):
content = {
'order_by':
{
'order_field':
{
'field': 'report_timestamp',
'order': 'desc',
},
}
}
expected_results = {
'order_by': '[{"field":"report_timestamp","order":"desc"}]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_order_by_query_with_single_search_query(self):
content = {
'query':
{
1: '["=","certname","hostname.example.com"]'
},
'order_by':
{
'order_field':
{
'field': 'report_timestamp',
'order': 'desc',
},
}
}
expected_results = {
'order_by': '[{"field":"report_timestamp","order":"desc"}]',
'query': '["and",["=","certname","hostname.example.com"]]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_order_by_query_with_double_search_query_with_operator(self):
content = {
'query':
{
'operator': 'or',
1: '["=","certname","hostname1.example.com"]',
2: '["=","certname","hostname2.example.com"]'
},
'order_by':
{
'order_field':
{
'field': 'report_timestamp',
'order': 'desc',
},
}
}
expected_results = {
'query': '["and",["=","certname","hostname1.example.com"],["=","certname","hostname2.example.com"]]',
'order_by': '[{"field":"report_timestamp","order":"desc"}]'
}
results = mk_puppetdb_query(content)
self.assertEqual(expected_results, results)
def test_query_with_string(self):
content = "string value"
self.assertRaises(TypeError, mk_puppetdb_query, params=content)
def test_query_with_list(self):
content = ['test1', 'test2']
self.assertRaises(TypeError, mk_puppetdb_query, params=content)
def test_query_with_integer(self):
content = 1
self.assertRaises(TypeError, mk_puppetdb_query, params=content)
def test_query_with_empty_dict(self):
content = {}
expected_results = {}
self.assertEquals(content, expected_results)
```
#### File: panopuppet/tests/test_puppetdb_utils.py
```python
from datetime import datetime, timedelta
from django.test import TestCase
from pano.puppetdb.pdbutils import is_unreported
__author__ = 'etaklar'
class CheckIfUnreported(TestCase):
def test_none_date(self):
"""
Should fail because if there is no report timestamp
the node has not managed to complete a puppet run.
"""
"""
:return:
"""
date = None
results = is_unreported(date)
self.assertEquals(results, True)
def test_date_reported_within_two_hours(self):
"""
Should return False since the node has reported within
the default value of 2 hours.
"""
date = (datetime.utcnow() - timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
results = is_unreported(date)
self.assertEquals(results, False)
def test_date_unreported_within_two_hours(self):
"""
Should return True since the node has not reported within
the default value of 2 hours.
"""
date = (datetime.utcnow() - timedelta(hours=3)).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
results = is_unreported(date)
self.assertEquals(results, True)
def test_invalid_formatted_date(self):
"""
Since a date in the incorrect format can not be read
datetime should raise an error because it does not
match the format %Y-%m-%dT%H:%M:%S.%fZ
"""
date = 'not_a_real_date'
self.assertRaises(ValueError, is_unreported, node_report_timestamp=date)
def test_unreported_date_with_hours_set_to_24_hours(self):
"""
Test timestamp set to 25 hours ago, it should
count as a unreported timestamp since the unreported
time is set to 24 hours.
"""
date = (datetime.utcnow() - timedelta(hours=25)).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
results = is_unreported(date, unreported=24)
self.assertEquals(results, True)
def test_reported_date_with_hours_set_to_30_minutes_using_float_value(self):
"""
Test unreported parameter to is_unreported accepts float value.
It is set to .5 hours which is effectively 30 minutes.
With a time set to 15 minutes ago it should return
that the node is not unreported.
"""
date = (datetime.utcnow() - timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
results = is_unreported(date, unreported=.5)
self.assertEquals(results, False)
```
|
{
"source": "jeroFlo/robotsVision_openCV",
"score": 3
}
|
#### File: samples/python/practice_2p1.py
```python
from __future__ import print_function
import numpy as np
import cv2 as cv
import math
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv.calcHist([np.uint8(im)],[ch],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def binary(img, threshold = 127):
im = img
im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
#blur = cv.GaussianBlur(im,(3,3),0)
#ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
ret1,th3 = cv.threshold(im,threshold,255,cv.THRESH_BINARY)
return th3
def contrast(im, alpha):
img = np.where(im*alpha < im, 255, alpha*im)
return img
def brightness(im, beta):
if beta > 0:
img = np.where(im+beta < im, 255, im+beta)
elif beta < 0:
img = np.where(im+beta < 0, 0, im+beta)
return np.uint8(img)
else:
return im
return img
def main():
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
#../../../practice_1_2_fft/resources/gato_2.jpg
else :
fname = 'lena.jpg'
print("usage : python hist.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
if im is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
print(''' Histogram plotting \n
show histogram for color image in curve mode \n
show binarization from the input image \n
Esc - exit \n
''')
cv.imshow('image',im)
curve = hist_curve(im) #obtener el histograma
cv.imshow('histogram original image',curve)#mostrar el histograma
img = contrast(im, 2)
cv.imshow('contrast', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram contrast',curve)
img = brightness(im, -100)
cv.imshow('brightness mine', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram brightness mine',curve)
#for contrast and brightness given function
img = cv.convertScaleAbs(im, alpha=1, beta=100)
cv.imshow('brightness', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram brightness',curve)
#Edge detection
#gaussiana = cv2.GaussianBlur(gris, (n,n), 0)
img_gauss = cv.GaussianBlur(gray, (3,3), 0) # 3x3 kernel
img = binary(im, 150)
cv.imshow('image',img)
# Canny
#canny = cv2.Canny(imagen, umbral_minimo histeresis, umbral_maximo)
img_canny = cv.Canny(img, 100, 200)
cv.imshow("Canny", img_canny)
# Sobel
img_sobelx = cv.Sobel(img_gauss, cv.CV_8U, 1, 0, ksize=3)
img_sobely = cv.Sobel(img_gauss, cv.CV_8U, 0, 1, ksize=3)
img_sobel = img_sobelx + img_sobely
cv.imshow("Sobel X", img_sobelx)
cv.imshow("Sobel Y", img_sobely)
cv.imshow("Sobel", img_sobel)
# Prewitt
kernelx = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernely = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
img_prewittx = cv.filter2D(img_gauss, -1, kernelx)
img_prewitty = cv.filter2D(img_gauss, -1, kernely)
cv.imshow("Prewitt X", img_prewittx)
cv.imshow("Prewitt Y", img_prewitty)
cv.imshow("Prewitt", img_prewittx + img_prewitty)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
```
#### File: samples/python/practice_3p1.py
```python
from __future__ import print_function
from matplotlib.colors import hsv_to_rgb
from skimage.transform import resize
from skimage.feature import hog
from skimage import exposure
import numpy as np
import cv2 as cv
import math
import matplotlib.pyplot as plt
import imutils
import time
import re
from numpy import savetxt
def scale(img, scale_percent = 50):# percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
return cv.resize(img, dim, interpolation= cv.INTER_AREA)
def pyramid(image, scale=1.5, minSize=(30,30)):
yield image
while True:
w = int(image.shape[1]/scale)
image = imutils.resize(image, width=w)
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
yield image
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def HOG(crop_img, orient=9, pixels_per_cell=(8,8), cells_per_block=(2,2)):
return hog(crop_img, orientations=orient, pixels_per_cell=pixels_per_cell, cells_per_block=cells_per_block, visualize=True, multichannel=True)
def main():
image = cv.imread("../data/puzzle_part.jpg")
image = scale(image, 20)
print(image.shape)
'''
for (i, resized) in enumerate(pyramid(image)):
# show the resized image
cv.imshow("Layer {}".format(i + 1), resized)
cv.waitKey(0)
'''
f = open("../data/features_hog.csv", "w")
(winW, winH) = (64,128)
# loop over the image pyramid
for resized in pyramid(image, scale=2, minSize=(64,128)):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
continue
#print("resized size {}".format(resized.shape))
#print("window size {}".format(window.shape))
fd, hog_image = HOG(window)
#print(window)
f.write(','.join(map(str,fd)))
f.write('\n')
#cv.imshow('window', window)
#cv.imshow('hog', hog_image)
#cv.waitKey(0)
# we'll just draw the window
#clone = resized.copy()
#cv.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
#cv.imshow("Window", clone)
#cv.waitKey(1)
#time.sleep(0.025)
f.close()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
```
#### File: samples/python/segmentation.py
```python
from __future__ import print_function
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import random as rng
rng.seed(12345)
def scale(img, scale_percent = 50):# percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
return cv.resize(img, dim, interpolation= cv.INTER_AREA)
def binarization(img, thres = 127, otsu=False, inv=True):
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#cv.imshow('gray', gray)
if not otsu:
if inv:
ret1,thresh = cv.threshold(gray,thres,255,cv.THRESH_BINARY_INV)
else:
ret1,thresh = cv.threshold(gray,thres,255,cv.THRESH_BINARY)
return thresh
if inv:
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
else:
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
return thresh
def erosion(img, size = 3, iter=1):
kernel = np.ones((size,size), np.uint8)
# The first parameter is the original image,
# kernel is the matrix with which image is
# convolved and third parameter is the number
# of iterations, which will determine how much
# you want to erode/dilate a given image.
img_erosion = cv.erode(img, kernel, iterations=iter)
return img_erosion
def dilation(img, size = 3, iter=1):
kernel = np.ones((size,size), np.uint8)
img_dilation = cv.dilate(img, kernel, iterations=iter)
return img_dilation
def closing(img, size =3 ,iter = 1):
for i_iter in range(iter):
img = erosion(dilation(img, size), size)
return img
def opening(img, size =3 ,iter = 1):
for i_iter in range(iter):
img = dilation(erosion(img, size), size)
return img
def main():
#image_path = '../data/puzzle_part_edit.jpg'
image_path = '../data/one_piece_puzzle.jpeg'
img = cv.imread(image_path)
print(img.shape)
img = scale(img, 20)
print(img.shape)
#cv.imshow('Scaled', img)
binary = binarization(img, 110)
#binary_n = binarization(img, 105, inv=False)
#cv.imshow('binary not inverted', binary_n)
binary = dilation(binary, size=6, iter=2)
#cv.imshow('binary', binary)
sure_bg = closing(binary, 4, iter=2)
#binary = dilation(binary, size=4)
cv.imshow('closing', binary)
dist = cv.distanceTransform(sure_bg,distanceType=cv.DIST_L2,maskSize=3)
cv.normalize(dist, dist, 0, 1.0, cv.NORM_MINMAX)
cv.imshow('dist', dist)
_, binary = cv.threshold(dist, 0.29, 1.0, cv.THRESH_BINARY)
cv.imshow('binary normalize', binary)
#binary = opening(binary)
sure_fg = erosion(binary, 4)
cv.imshow('opening', sure_fg)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
#markers = np.uint8(markers)
#cv.imshow('markers', markers)
markers = cv.watershed(img,markers)
img[markers == -1] = [0,255,0]
cv.imshow('result',img)
'''
# Marker labelling
binary = np.uint8(binary)
contours, _= cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
#print(binary)
#ret, markers = cv.connectedComponents(binary)
# Add one to all labels so that sure background is not 0, but 1
#print(markers[0][20])
#markers = markers+1
# Now, mark the region of unknown with zero
#markerrs[unknown==255] = 0
markers = cv.watershed(img,markers)
print(markers)
img[markers == -1] = [255,0,0]
cv.imshow('x', img)
'''
# Create the CV_8U version of the distance image
# It is needed for findContours()
#dist_8u = binary.astype('uint8')
# Find total markers
#contours, _= cv.findContours(dist_8u, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
#print(contours)
# Create the marker image for the watershed algorithm
#markers = np.zeros(binary.shape, dtype=np.int32)
# Draw the foreground markers
#for i in range(len(contours)):
# print(i)
# cv.drawContours(markers, contours, i, (i+1), -1)
# Draw the background marker
#cv.circle(markers, (5,5), 3, (255,0,0), -1)
#markers = cv.watershed(img, markers)
#img[markers == -1] = [0,255,0]
#print(markers)
#cv.imshow('x', img)
'''
#mark = np.zeros(markers.shape, dtype=np.uint8)
mark = markers.astype('uint8')
mark = cv.bitwise_not(mark)
# uncomment this if you want to see how the mark
# image looks like at that point
cv.imshow('Markers_v2', mark)
# Generate random colors
colors = []
for contour in contours:
colors.append((rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)))
# Create the result image
dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8)
# Fill labeled objects with random colors
for i in range(markers.shape[0]):
for j in range(markers.shape[1]):
index = markers[i,j]
if index > 0 and index <= len(contours):
dst[i,j,:] = colors[index-1]
# Visualize the final image
cv.imshow('Final Result', dst)
#img[markers==-1] = [255,0,0]
#cv.imshow('Markers', markers)
'''
cv.waitKey()
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
```
|
{
"source": "jerogee/mdl-ling-chunks",
"score": 2
}
|
#### File: mdl-ling-chunks/src/est.py
```python
import os
import sys
import gzip
import logging
import tempfile
import psutil
from estimators import lzss
def mem_usage_percent():
process = psutil.Process(os.getpid())
return process.memory_percent()
def get_tmpfilename():
try:
return tempfile.mktemp(prefix=tempfile.template)
except TypeError:
return tempfile.mktemp()
def file_append(f, s):
with open(f, 'a') as fh:
fh.write(s)
def lzss_calculate_compression_ratio(f):
# Temporary filename for compressed version
f_c = get_tmpfilename()
# Get file handles of input and output files
fh_i = open(f, 'rb')
fh_o = open(f_c, 'wb')
# Compress, get # bytes, clean up
[bytes_i, bytes_o] = lzss.encode(fh_i, fh_o)
os.remove(f_c)
return bytes_i/bytes_o
def run_lzss_file(fn_i, fn_o):
# Create temporary file name
tfn = get_tmpfilename()
# Process corpus line by line and produce output
cnt_sen = 0
cnt_crs = 0
logging.info('loading [%s]', fn_i)
logging.info('writing [%s]', fn_o)
with open(fn_i, 'r') as fhi, open(fn_o, 'w') as fho:
# Write output file header
fho.write('nr\tlength\tratio\n')
# Iterate over input sentences
for sentence in fhi:
cnt_sen += 1
if cnt_sen % 50 == 0:
logging.info('... %d lines', cnt_sen)
# Strip POS tags
tokens = [word.split('|')[0] for word in sentence.split()]
# Append sentence w/o spaces
tokenstring = ''.join(tokens)
cnt_crs += len(tokenstring)
file_append(tfn, tokenstring)
# Get compression ratio and write output
cr = lzss_calculate_compression_ratio(tfn)
fho.write('%d\t%d\t%.8f\n' % (cnt_sen, cnt_crs, cr))
# Cleanup
os.remove(tfn)
# Some reporting
logging.info('%d lines processed', cnt_sen)
def run_lz77_mem(fn_i, fn_o):
# Process corpus line by line and produce output
cnt_sen = 0
txt_plain = ''
logging.info('loading [%s]', fn_i)
logging.info('writing [%s]', fn_o)
with open(fn_i, 'r') as fhi, open(fn_o, 'w') as fho:
# Write output file header
fho.write('nr\tlength\tratio\n')
# Iterate over input sentences
for sentence in fhi:
cnt_sen += 1
if cnt_sen % 50 == 0:
mem_usage = mem_usage_percent()
logging.info('... %d lines\tusing %.2f%% RAM', cnt_sen, mem_usage)
if mem_usage > 90:
logging.error('Insufficient RAM. QUITTING!')
exit(1)
# Strip POS tags
tokens = [word.split('|')[0] for word in sentence.split()]
# Append sentence w/o spaces, then add sentence with space sep
tokenstring = ''.join(tokens)
txt_plain += tokenstring + ' '
txt_compr = gzip.compress(str.encode(txt_plain))
# Get compression ratio and write output
cr = len(txt_plain) / len(txt_compr)
fho.write('%d\t%d\t%.8f\n' % (cnt_sen, len(txt_plain), cr))
# Some reporting
logging.info('%d lines processed', cnt_sen)
def main(argv):
if len(argv) < 2:
logging.error('Insufficient arguments')
exit(1)
# Run lz77 - in memory
run_lz77_mem(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
logging.basicConfig( stream=sys.stderr,
level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%H:%M:%S')
main(sys.argv[1:])
```
|
{
"source": "jerojero/Requiem-BOT",
"score": 3
}
|
#### File: jerojero/Requiem-BOT/main.py
```python
import sys
import socket
import string
import topics
import random
import time
import threading
SERVER = "irc.rizon.net"
CHANNEL = "#" + sys.argv[1]
NICK = sys.argv[2]
OWNER = "dude1"
OWNERVHOST = "singing.a.sad.song"
# Your vhost is unique so this way only you (identified) may be able to part and join channels.
INTRO = "Every hour it gives you a literary topic to think about your meaningless life."
channelsJoined = [CHANNEL, ]
def join_channel(channel):
ircsock.send(bytes("JOIN "+ channel +"\n", "UTF-8"))
ircsock.send(bytes("PRIVMSG "+ channel +" :"+ INTRO +"\n", "UTF-8"))
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((SERVER, 6667))
ircsock.send(bytes("USER "+ NICK +" "+ NICK +" "+ NICK +" :connected\n", "UTF-8"))
ircsock.send(bytes("NICK "+ NICK +"\n", "UTF-8"))
# ircsock.send(bytes("NICKSERV :IDENTIFY %s\r\n" % password, "UTF-8"))
time.sleep(2)
join_channel(CHANNEL)
# Bot functions
def send_to_channel_normal(message, channel):
ircsock.send(bytes("PRIVMSG "+ channel +" :"+ message +"\n", "UTF-8"))
def send_to_channel_bold(message, channel):
ircsock.send(bytes("PRIVMSG "+ channel +" :""\u0002"+message +"\n", "UTF-8"))
def send_to_channel_cursive(message, channel):
ircsock.send(bytes("PRIVMSG "+ channel +" :""\u001D"+message +"\n", "UTF-8"))
def part(channel):
ircsock.send(bytes("PART "+ channel +"\n", "UTF-8"))
def get_time(delay):
timeNow = [time.strftime("%H"), time.strftime("%M"), time.strftime("%S")]
time.sleep(delay)
return(timeNow)
def timer():
while True:
currentTime = get_time(30)
if(currentTime[1] == "00"):
topicName = random.choice(list(topics.topics.keys()))
topicTran = topics.topics[topicName][0]
topicDesc = topics.topics[topicName][1]
for channel in channelsJoined:
send_to_channel_bold(topicName, channel)
send_to_channel_cursive(topicTran, channel)
send_to_channel_normal(topicDesc, channel)
time.sleep(60)
def ircBuffer():
readbuffer = ""
while True:
readbuffer = readbuffer+ircsock.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer=temp.pop( )
try:
for line in temp:
line = str.rstrip(line)
line = str.split(line)
print(line)
channel = ""
try:
for char in line[2]:
if(char != ":"):
channel += char
except:
pass
if(line[0] == "PING"):
ircsock.send(bytes("PONG %s\r\n" % line[1], "UTF-8"))
if(line[3] == ":!!topic"):
topicName = random.choice(list(topics.topics.keys()))
topicTran = topics.topics[topicName][0]
topicDesc = topics.topics[topicName][1]
send_to_channel_bold(topicName, channel)
send_to_channel_cursive(topicTran, channel)
send_to_channel_normal(topicDesc, channel)
if(line[3] in (":!!" + word for word in topics.first_word)):
# checks if the message starts with !! if it does checks if the next words is in a list of first words, is a valid phrase in topics
size = len(line)
# some topics are longer so this makes sure you get all the words
index = 3
# starts at the fourth element, first element (0) is the username, second (1) is PRIVMSG, third (2) is the channel and fourth is the first word (:!!firstword)
phrase = ""
while (index < size):
phrase += line[index] + " "
index += 1
phrase = phrase.replace(":!!","")
phrase = phrase.capitalize()
phrase = phrase.rstrip()
sender = ""
for char in line[0]:
if(char == "!"):
break
if(char != ":"):
sender += char
if(sender != NICK):
topicName = phrase
topicTran = topics.topics[topicName][0]
topicDesc = topics.topics[topicName][1]
send_to_channel_bold(topicName, channel)
send_to_channel_cursive(topicTran, channel)
send_to_channel_normal(topicDesc, channel)
if(line[3] == ":!!help"):
message = "Every hour it gives a random phrase so you think about your meaningless life, do !!topic to get one right this moment. Also you can do !!name of topic to get an explanation on that one (ej: !!carpe diem)."
send_to_channel_normal(message, channel)
if(line[3] == ":!!quit") and (line[4] == NICK):
sender = line[0].split("@")[1]
if(sender == OWNERVHOST):
part(channel)
if(line[3] == ":!!join") and (line[4] == NICK):
sender = line[0].split("@")[1]
if(sender == OWNERVHOST):
channelToJoin = ""
for char in line[4]:
channelToJoin += char
join_channel(channelToJoin)
channelsJoined.append(channelToJoin)
except:
continue
t1 = threading.Thread(target = timer)
t2 = threading.Thread(target = ircBuffer)
t1.start()
t2.start()
```
|
{
"source": "Jerold25/DarkWeb-Crawling-Indexing",
"score": 3
}
|
#### File: Code/crawler/crawl_bot.py
```python
from get_domains import *
from file_manage import *
from link_finder import link_crawler
from urllib.request import urlopen
import tldextract
#Importing Stem libraries
from stem import Signal
from stem.control import Controller
import socks, socket
#Initiating Connection
with Controller.from_port(port=9051) as controller:
controller.authenticate("insert-your-key")
controller.signal(Signal.NEWNYM)
# TOR SETUP GLOBAL Vars
SOCKS_PORT = 9050 # TOR proxy port that is default from torrc, change to whatever torrc is configured to
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", SOCKS_PORT)
socket.socket = socks.socksocket
# Perform DNS resolution through the socket
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
class Crawl_bot:
folder_name, start_link, domain_name, queued_data, crawled_data = '', '', '', '', ''
queue = set()
data_crawled = set()
def __init__(self, folder_name, start_link, domain_name):
Crawl_bot.folder_name = folder_name
Crawl_bot.start_link = start_link
Crawl_bot.domain_name = domain_name
Crawl_bot.queued_data = Crawl_bot.folder_name + '/queue.txt'
Crawl_bot.crawled_data = Crawl_bot.folder_name + '/crawled.txt'
self.initiate_directory()
self.crawl_page('Spider starts here', Crawl_bot.start_link)
@staticmethod
def initiate_directory(): # Define and create new directory on the first run
create_project_folder(Crawl_bot.folder_name)
create_data_files(Crawl_bot.folder_name, Crawl_bot.start_link)
Crawl_bot.queue = convert_to_set(Crawl_bot.queued_data)
Crawl_bot.data_crawled = convert_to_set(Crawl_bot.crawled_data)
@staticmethod
def crawl_page(thread_name, web_url): # Fill queue and then update files, also updating user display
print(web_url)
if web_url not in Crawl_bot.data_crawled:
print(thread_name + ' now crawl starts ' + web_url)
print('Queue_url ' + str(len(Crawl_bot.queue)) + ' | Crawled_url ' + str(len(Crawl_bot.data_crawled)))
Crawl_bot.add_url_to_queue(Crawl_bot.collect_url(web_url))
Crawl_bot.queue.remove(web_url)
Crawl_bot.data_crawled.add(web_url)
Crawl_bot.update_folder()
# Converts raw response data into readable information and checks for proper html formatting
@staticmethod
def collect_url(web_url):
html_data_string = ''
try:
received_response = urlopen(web_url)
if 'text/html' in received_response.getheader('Content-Type'):
data_bytes = received_response.read()
html_data_string = data_bytes.decode("latin-1")
link_finder = link_crawler(Crawl_bot.start_link, web_url)
link_finder.feed(html_data_string)
##############################################################################################################################################################################################
#######################################FOR SCRAPPING PURPOSES#################################################################################################################################
f = open(Crawl_bot.folder_name + '/' + ((tldextract.extract(web_url)).domain), 'a')
f.write(html_data_string + "\n\n\n" + '#####EOF#####' + "\n\n\n")
f.close()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
except Exception as e:
print(str(e))
return set()
return link_finder.page_urls()
@staticmethod
def add_url_to_queue(links): # Queue data saves to project files
for url in links:
if (url in Crawl_bot.queue) or (url in Crawl_bot.data_crawled):
continue
Crawl_bot.queue.add(url)
@staticmethod
def update_folder(): # Update the project directory
set_to_file(Crawl_bot.queue, Crawl_bot.queued_data)
set_to_file(Crawl_bot.data_crawled, Crawl_bot.crawled_data)
```
|
{
"source": "Jerold25/textanalysis_yelp",
"score": 2
}
|
#### File: textanalysis_yelp/backend/app.py
```python
import string
import time
import bson
from collections import OrderedDict
import datetime
from bson.json_util import dumps, loads
from flask import Flask, redirect, url_for
from flask import jsonify
from flask import request, make_response
from flask_pymongo import PyMongo
from flask_cors import CORS
from werkzeug.contrib.cache import SimpleCache
app = Flask(__name__)
cors = CORS(app)
cache = SimpleCache()
app.config['MONGO_DBNAME'] = 'yelp-db'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/yelp-db'
mongo = PyMongo(app)
from escapejson import escapejson
@app.route('/get-data', methods=['GET'])
def get_resturant_info():
res = []
try:
mile = 3963
# Retrieve 1000 businesses from the center of Tempe -> nearest to the farthest
x = mongo.db.business.find({"location": {"$nearSphere": [-111.9400, 33.4255], "$minDistance": 0 / mile}}).limit(
1000)
for i in x:
temp = {'type': "Feature"}
props = {'city': i['city'], 'review_count': i['review_count'], 'name': i['name'],
'business_id': i['business_id'], 'hours': i['hours'], 'state': i['state'],
'postal_code': i['postal_code'], 'stars': i['stars'], 'address': i['address'],
'is_open': i['is_open'], 'attributes': i['attributes'], 'categories': i['categories']}
temp['properties'] = props
geo = {'type': 'Point', 'coordinates': i['location']}
temp['geometry'] = geo
res.append(temp)
except Exception as ex:
print(ex)
return make_response(dumps(res))
@app.route('/', methods=['GET'])
def start():
return redirect(url_for('get_all_details'))
def get_liveliness(business_id):
obj = mongo.db.checkin.find_one({"business_id": business_id})
date_str = obj['date']
dates = date_str.split(', ')
data = []
heat_map = [[0 for i in range(24)] for j in range(7)]
date_map = {0: set(), 1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set()}
for i in dates:
date, time = i.split(' ')
day = int(datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%w'))
date_map[day].add(date)
hour = int(time.split(':')[0])
heat_map[day][hour] += 1
for i in range(7):
for j in range(24):
x = len(date_map[i])
if x>1:
heat_map[i][j] /= x
data.append(OrderedDict([('day', i), ('hour', j), ('value', heat_map[i][j])]))
return data
@app.route('/all-details', methods=['GET'])
def get_all_details():
# args = request.args
# business_id = args["business-id"]
business_id = "mKf7pIkOYpEhJTqjw4_Fxg"
response = cache.get(business_id)
if not response:
word_cloud_data = get_word_cloud_trend(business_id)
rating_trend_data = get_rating_trend(business_id)
sentiment_trend_data = get_sentiment_trend(business_id)
liveliness = get_liveliness(business_id)
combined_trend = get_combined_trend(business_id)
response = {"ratingTrend": rating_trend_data, "sentimentTrend": sentiment_trend_data,
"liveliness": liveliness,
"wordCloudData":
word_cloud_data, "combinedTrends": combined_trend}
cache.set(business_id, response, timeout=15 * 60)
return make_response(escapejson(dumps(response)))
# @app.route('/rating_trend', methods=['GET'])
def get_rating_trend(business_id):
cursor = mongo.db.review.aggregate(
[
{"$match": {"business_id": business_id}},
{'$project': {'part_date': {'$substr': ['$date', 0, 7]}, 'stars': '$stars', 'business_id': '$business_id'}},
{"$group": {"_id": '$part_date', "avgRating": {"$avg": '$stars'}}},
{"$sort": {"_id": 1}}
]
)
data = []
c = 0
cum = 0
for i in cursor:
val = (cum * c) + i['avgRating']
c = c + 1
cum = val / c
data.append({'date': i['_id'], 'count': cum})
return data
def get_sentiment_trend(business_id):
cursor = mongo.db.sentiment_info.aggregate(
[
{"$match": {"business_id": business_id}},
{'$project': {'part_date': {'$substr': ['$date', 0, 7]},
'pos_sentiment': {'$arrayElemAt': ["$sentiment", 1]}, 'business_id': 1}},
{"$group": {"_id": '$part_date', "avgSenti": {"$avg": '$pos_sentiment'}}},
{"$sort": {"_id": 1}}
]
)
data = []
c = 0
cum = 0
for i in cursor:
val = (cum * c) + i['avgSenti']
c = c + 1
cum = val / c
data.append({'date': i['_id'], 'count': cum})
return data
def get_combined_trend(business_id):
#SENTIMENTS
cursor = mongo.db.sentiment_info.aggregate(
[
{"$match": {"business_id": business_id}},
{'$project': {'part_date': {'$substr': ['$date', 0, 10]},
'pos_sentiment': {'$arrayElemAt': ["$sentiment", 1]}, 'business_id': 1}},
{"$group": {"_id": '$part_date', "avgSenti": {"$avg": '$pos_sentiment'}}},
{"$sort": {"_id": 1}}
]
)
sentiments = {}
c = 0
cum = 0
for i in cursor:
val = (cum * c) + i['avgSenti']
c = c + 1
cum = val / c
dat = i['_id']
date_time_obj = time.strptime(dat, "%Y-%m-%d")
epoch = int(time.mktime(date_time_obj))
sentiments[epoch] = cum
#RATINGS
cursor = mongo.db.review.aggregate(
[
{"$match": {"business_id": business_id}},
{'$project': {'part_date': {'$substr': ['$date', 0, 10]}, 'stars': '$stars',
'business_id': '$business_id'}},
{"$group": {"_id": '$part_date', "avgRating": {"$avg": '$stars'}}},
{"$sort": {"_id": 1}}
]
)
rating = {}
c = 0
cum = 0
for i in cursor:
val = (cum * c) + i['avgRating']
c = c + 1
cum = val / c
dat = i['_id']
date_time_obj = time.strptime(dat, "%Y-%m-%d")
epoch = int(time.mktime(date_time_obj))
rating[epoch] = cum
trend = []
for key, val in rating.items():
senti = 0
if key in sentiments:
senti = sentiments[key] * 5
temp = {'date': key, 'ratings': val, 'sentiment': senti}
trend.append(temp)
return trend
# @app.route('/word-cloud-trend', methods=['GET'])
def get_word_cloud_trend(business_id):
# args = request.args
# restuarant_id = args["resturant-id"]
word_value_dict = dict()
word_reviews_dict = dict()
for review in mongo.db.food_items_info.find({"business_id": business_id}):
for word in review["food_items"]:
word = escape_keys(word)
if word not in word_value_dict:
word_value_dict[word] = 0
if word not in word_reviews_dict:
word_reviews_dict[word] = []
word_value_dict[word] = word_value_dict[word] + review["stars"]
if len(word_reviews_dict[word]) < 10:
word_reviews_dict[word].append({"text": escape_quotes(review["text"]), "stars": review["stars"], "date": review["date"]})
# words = []
# for word, value in word_value_dict.items():
# words.append({"text": word, "weight": value})
sample_title = {}
for word, count in word_value_dict.items():
sample_title[word] = "Got {} stars!".format(count)
response = {"count": word_value_dict, "sample_title": sample_title, "word_reviews":word_reviews_dict }
return response
def remove_punctuations(text):
return str(text).translate(str.maketrans('', '', string.punctuation))
def escape_keys(text):
text = text.lower()
return remove_punctuations(text)
def escape_quotes(text):
text = text.replace('"', '\"')
return text
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=5001)
```
|
{
"source": "jeroldalbertson-wf/rick_roller",
"score": 3
}
|
#### File: rick_roller/src/models.py
```python
import json
from google.appengine.ext import ndb
class RickRoll(ndb.Model):
ip = ndb.StringProperty()
def get_rick_rolls_list():
rick_rolls = RickRoll.query().fetch()
if not rick_rolls:
rick_rolls = []
return rick_rolls
```
|
{
"source": "JeroldLeo/ursina",
"score": 4
}
|
#### File: ursina/samples/clicker_game.py
```python
from ursina import *
app = Ursina()
window.color = color._20
gold = 0
counter = Text(text='0', y=.25, z=-1, scale=2, origin=(0, 0), background=True)
button = Button(text='+', color=color.azure, scale=.125)
def button_click():
global gold
gold += 1
counter.text = str(gold)
button.on_click = button_click
button_2 = Button(cost=10, x=.2, scale=.125, color=color.dark_gray, disabled=True)
button_2.tooltip = Tooltip(f'<gold>Gold Generator\n<default>Earn 1 gold every second.\nCosts {button_2.cost} gold.')
def buy_auto_gold():
global gold
if gold >= button_2.cost:
gold -= button_2.cost
counter.text = str(gold)
invoke(auto_generate_gold, 1, 1)
button_2.on_click = buy_auto_gold
def auto_generate_gold(value=1, interval=1):
global gold
gold += 1
counter.text = str(gold)
button_2.animate_scale(.125 * 1.1, duration=.1)
button_2.animate_scale(.125, duration=.1, delay=.1)
invoke(auto_generate_gold, value, delay=interval)
def update():
global gold
for b in (button_2,):
if gold >= b.cost:
b.disabled = False
b.color = color.green
else:
b.disabled = True
b.color = color.gray
app.run()
```
#### File: ursina/samples/minecraft_clone.py
```python
from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
app = Ursina()
# Define a Voxel class.
# By setting the parent to scene and the model to 'cube' it becomes a 3d button.
class Voxel(Button):
def __init__(self, position=(0, 0, 0)):
super().__init__(
parent=scene,
position=position,
model='cube',
origin_y=.5,
texture='white_cube',
color=color.color(0, 0, random.uniform(.9, 1.0)),
highlight_color=color.lime,
)
def input(self, key):
if self.hovered:
if key == 'left mouse down':
voxel = Voxel(position=self.position + mouse.normal)
if key == 'right mouse down':
destroy(self)
for z in range(8):
for x in range(8):
voxel = Voxel(position=(x, 0, z))
player = FirstPersonController()
app.run()
```
|
{
"source": "jeromaerts/ESMValTool",
"score": 2
}
|
#### File: diag_scripts/emergent_constraints/cox18nature.py
```python
import logging
import os
import iris
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import esmvaltool.diag_scripts.emergent_constraints as ec
import esmvaltool.diag_scripts.shared.iris_helpers as ih
from esmvaltool.diag_scripts.shared import (
ProvenanceLogger, get_diagnostic_filename, get_plot_filename,
group_metadata, io, plot, run_diagnostic, select_metadata)
logger = logging.getLogger(os.path.basename(__file__))
plt.style.use(plot.get_path_to_mpl_style())
COLOR_SMALL_LAMBDA = '#800060'
COLOR_LARGE_LAMBDA = '#009900'
(FIG, AXES) = plt.subplots()
ECS_ATTRS = {
'short_name': 'ecs',
'long_name': 'Effective Climate Sensitivity (ECS)',
'units': 'K',
}
TASA_ATTRS = {
'short_name': 'tasa',
'long_name': 'Near-Surface Air Temperature Anomaly',
'units': 'K',
}
PSI_ATTRS = {
'short_name': 'psi',
'long_name': 'Temperature variability metric',
'units': 'K',
}
def _get_ancestor_files(cfg, obs_name, projects=None):
"""Get ancestor files for provenance."""
if projects is None:
projects = _get_project(cfg)
if isinstance(projects, str):
projects = [projects]
datasets = []
for project in projects:
datasets.extend(
select_metadata(cfg['input_data'].values(), project=project))
datasets.extend(
select_metadata(cfg['input_data'].values(), dataset=obs_name))
return [d['filename'] for d in datasets]
def _get_model_color(model, lambda_cube):
"""Get color of model dependent on climate feedback parameter."""
clim_sens = lambda_cube.extract(iris.Constraint(dataset=model)).data
if clim_sens < 1.0:
col = COLOR_SMALL_LAMBDA
else:
col = COLOR_LARGE_LAMBDA
return col
def _plot_model_point(model, psi_cube, ecs_cube, lambda_cube):
"""Plot a single model point for emergent relationship."""
col = _get_model_color(model, lambda_cube)
style = plot.get_dataset_style(model, 'cox18nature')
AXES.plot(
psi_cube.extract(iris.Constraint(dataset=model)).data,
ecs_cube.extract(iris.Constraint(dataset=model)).data,
linestyle='none',
marker=style['mark'],
markeredgecolor=col,
markerfacecolor=col,
markersize=style['size'])
def _get_line_plot_legend():
"""Add legend for line plots."""
color_obs = plot.get_dataset_style('OBS', 'cox18nature')['color']
handles = [
mlines.Line2D([], [],
color=COLOR_SMALL_LAMBDA,
label=r'$\lambda < 1.0$ Wm$^{-2}$K$^{-1}$'),
mlines.Line2D([], [],
color=COLOR_LARGE_LAMBDA,
label=r'$\lambda > 1.0$ Wm$^{-2}$K$^{-1}$'),
mlines.Line2D([], [],
linestyle='none',
marker='o',
markeredgecolor=color_obs,
markerfacecolor=color_obs,
label='Observations'),
]
return AXES.legend(handles=handles, loc='upper left')
def _get_project(cfg):
"""Extract project from cfg."""
input_data = cfg['input_data'].values()
projects = list(group_metadata(input_data, 'project').keys())
projects = [p for p in projects if 'obs' not in p.lower()]
if len(projects) == 1:
return projects[0]
return projects
def _save_fig(cfg, basename, legend=None):
"""Save matplotlib figure."""
path = get_plot_filename(basename, cfg)
if legend is None:
legend = []
else:
legend = [legend]
FIG.savefig(
path,
additional_artists=legend,
bbox_inches='tight',
orientation='landscape')
logger.info("Wrote %s", path)
AXES.cla()
return path
def get_external_cubes(cfg):
"""Get external cubes for psi, ECS and lambda."""
cubes = iris.cube.CubeList()
for filename in ('psi.nc', 'ecs.nc', 'lambda.nc'):
filepath = io.get_ancestor_file(cfg, filename)
cube = iris.load_cube(filepath)
cube = cube.extract(
ih.iris_project_constraint(['OBS'], cfg, negate=True))
cubes.append(cube)
cubes = ih.intersect_dataset_coordinates(cubes)
return (cubes[0], cubes[1], cubes[2])
def get_provenance_record(caption, statistics, plot_type, ancestor_files):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'ancestors': ancestor_files,
'authors': ['schlund_manuel'],
'caption': caption,
'domains': ['global'],
'plot_type': plot_type,
'realms': ['atmos'],
'references': ['cox18nature'],
'statistics': statistics,
'themes': ['EC'],
}
return record
def plot_temperature_anomaly(cfg, tas_cubes, lambda_cube, obs_name):
"""Plot temperature anomaly versus time."""
for cube in tas_cubes.values():
cube.data -= np.mean(
cube.extract(
iris.Constraint(year=lambda cell: 1961 <= cell <= 1990)).data)
# Save netcdf file and provencance
filename = 'temperature_anomaly_{}'.format(obs_name)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.save_1d_data(tas_cubes, netcdf_path, 'year', TASA_ATTRS)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"Simulated change in global temperature from {} models (coloured "
"lines), compared to the global temperature anomaly from the {} "
"dataset (black dots). The anomalies are relative to a baseline "
"period of 1961-1990.".format(project, obs_name), ['anomaly'],
['times'], _get_ancestor_files(cfg, obs_name))
# Plot
if cfg['write_plots']:
models = lambda_cube.coord('dataset').points
# Plot lines
for model in models:
cube = tas_cubes[model]
AXES.plot(
cube.coord('year').points,
cube.data,
color=_get_model_color(model, lambda_cube))
obs_style = plot.get_dataset_style('OBS', 'cox18nature')
obs_cube = tas_cubes[obs_name]
AXES.plot(
obs_cube.coord('year').points,
obs_cube.data,
linestyle='none',
marker='o',
markeredgecolor=obs_style['color'],
markerfacecolor=obs_style['color'])
# Plot appearance
AXES.set_title('Simulation of global warming record')
AXES.set_xlabel('Year')
AXES.set_ylabel('Temperature anomaly / K')
legend = _get_line_plot_legend()
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_psi(cfg, psi_cubes, lambda_cube, obs_name):
"""Plot temperature variability metric psi versus time."""
filename = 'temperature_variability_metric_{}'.format(obs_name)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.save_1d_data(psi_cubes, netcdf_path, 'year', PSI_ATTRS)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"Psi metric of variability versus time, from the {0} models "
"(coloured lines), and the {1} observational data (black circles). "
"The psi values are calculated for windows of width {2} yr, after "
"linear de-trending in each window. These {2}-yr windows are shown "
"for different end times.".format(project, obs_name,
cfg.get('window_length', 55)),
['corr', 'var'], ['times'], _get_ancestor_files(cfg, obs_name))
# Plot
if cfg['write_plots']:
models = lambda_cube.coord('dataset').points
# Plot lines
for model in models:
cube = psi_cubes[model]
AXES.plot(
cube.coord('year').points,
cube.data,
color=_get_model_color(model, lambda_cube))
obs_style = plot.get_dataset_style('OBS', 'cox18nature')
obs_cube = psi_cubes[obs_name]
AXES.plot(
obs_cube.coord('year').points,
obs_cube.data,
linestyle='none',
marker='o',
markeredgecolor=obs_style['color'],
markerfacecolor=obs_style['color'])
# Plot appearance
AXES.set_title('Metric of variability versus time')
AXES.set_xlabel('Year')
AXES.set_ylabel(r'$\Psi$ / K')
legend = _get_line_plot_legend()
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_emergent_relationship(cfg, psi_cube, ecs_cube, lambda_cube, obs_cube):
"""Plot emergent relationship."""
filename = 'emergent_relationship_{}'.format(
obs_cube.attributes['dataset'])
cube = ecs_cube.copy()
cube.add_aux_coord(
iris.coords.AuxCoord(psi_cube.data, **ih.convert_to_iris(PSI_ATTRS)),
0)
netcdf_path = get_diagnostic_filename(filename, cfg)
io.iris_save(cube, netcdf_path)
provenance_record = get_provenance_record(
"Emergent relationship between ECS and the psi metric. The black dot-"
"dashed line shows the best-fit linear regression across the model "
"ensemble, with the prediction error for the fit given by the black "
"dashed lines. The vertical blue lines show the observational "
"constraint from the {} observations: the mean (dot-dashed line) and "
"the mean plus and minus one standard deviation (dashed lines).".
format(obs_cube.attributes['dataset']), ['mean', 'corr', 'var'],
['scatter'], _get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
obs_mean = np.mean(obs_cube.data)
obs_std = np.std(obs_cube.data)
# Calculate regression line
lines = ec.regression_line(psi_cube.data, ecs_cube.data)
logger.info("Found emergent relationship with slope %.2f (r = %.2f)",
lines['slope'], lines['rvalue'])
# Plot points
for model in psi_cube.coord('dataset').points:
_plot_model_point(model, psi_cube, ecs_cube, lambda_cube)
# Plot lines
AXES.set_xlim(auto=False)
AXES.set_ylim(auto=False)
AXES.plot(
lines['x'],
lines['y_best_estim'],
color='black',
linestyle='dashdot',
label='Linear regression')
AXES.plot(
lines['x'],
lines['y_minus_err'],
color='black',
linestyle='dashed')
AXES.plot(
lines['x'], lines['y_plus_err'], color='black', linestyle='dashed')
AXES.axvline(
obs_mean,
color='blue',
linestyle='dashdot',
label='Observational constraint')
AXES.axvline(obs_mean - obs_std, color='blue', linestyle='dashed')
AXES.axvline(obs_mean + obs_std, color='blue', linestyle='dashed')
# Plot appearance
AXES.set_title('Emergent relationship fit')
AXES.set_xlabel(r'$\Psi$ / K')
AXES.set_ylabel('ECS / K')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_pdf(cfg, psi_cube, ecs_cube, obs_cube):
"""Plot probability density function of ECS."""
obs_mean = np.mean(obs_cube.data)
obs_std = np.std(obs_cube.data)
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
obs_mean, obs_std)
# Provenance
filename = 'pdf_{}'.format(obs_cube.attributes['dataset'])
netcdf_path = get_diagnostic_filename(filename, cfg)
cube = iris.cube.Cube(
ecs_pdf,
var_name='pdf',
long_name='Probability density function',
units='K-1')
cube.add_aux_coord(
iris.coords.AuxCoord(ecs_lin, **ih.convert_to_iris(ECS_ATTRS)), 0)
io.iris_save(cube, netcdf_path)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"The PDF for ECS. The orange histograms show the prior distributions "
"that arise from equal weighting of the {} models in 0.5 K bins.".
format(project), ['mean'], ['other'],
_get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
AXES.plot(
ecs_lin,
ecs_pdf,
color='black',
linewidth=2.0,
label='Emergent constraint')
AXES.hist(
ecs_cube.data,
bins=6,
range=(2.0, 5.0),
density=True,
color='orange',
label='{} models'.format(project))
# Plot appearance
AXES.set_title('PDF of emergent constraint')
AXES.set_xlabel('ECS / K')
AXES.set_ylabel('Probability density')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def plot_cdf(cfg, psi_cube, ecs_cube, obs_cube):
"""Plot cumulative distribution function of ECS."""
confidence_level = cfg.get('confidence_level', 0.66)
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
np.mean(obs_cube.data),
np.std(obs_cube.data))
ecs_cdf = ec.cdf(ecs_lin, ecs_pdf)
# Provenance
filename = 'cdf_{}'.format(obs_cube.attributes['dataset'])
netcdf_path = get_diagnostic_filename(filename, cfg)
cube = iris.cube.Cube(
ecs_cdf,
var_name='cdf',
long_name='Cumulative distribution function',
units='1')
cube.add_aux_coord(
iris.coords.AuxCoord(ecs_lin, **ih.convert_to_iris(ECS_ATTRS)), 0)
io.iris_save(cube, netcdf_path)
project = _get_project(cfg)
provenance_record = get_provenance_record(
"The CDF for ECS. The horizontal dot-dashed lines show the {}% "
"confidence limits. The orange histograms show the prior "
"distributions that arise from equal weighting of the {} models in "
"0.5 K bins.".format(int(confidence_level * 100), project), ['mean'],
['other'], _get_ancestor_files(cfg, obs_cube.attributes['dataset']))
# Plot
if cfg['write_plots']:
AXES.plot(
ecs_lin,
ecs_cdf,
color='black',
linewidth=2.0,
label='Emergent constraint')
AXES.hist(
ecs_cube.data,
bins=6,
range=(2.0, 5.0),
cumulative=True,
density=True,
color='orange',
label='{} models'.format(project))
AXES.axhline(
(1.0 - confidence_level) / 2.0, color='black', linestyle='dashdot')
AXES.axhline(
(1.0 + confidence_level) / 2.0, color='black', linestyle='dashdot')
# Plot appearance
AXES.set_title('CDF of emergent constraint')
AXES.set_xlabel('ECS / K')
AXES.set_ylabel('CDF')
legend = AXES.legend(loc='upper left')
# Save plot
provenance_record['plot_file'] = _save_fig(cfg, filename, legend)
# Write provenance
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record)
def get_ecs_range(cfg, psi_cube, ecs_cube, obs_cube):
"""Get constrained ecs range."""
confidence_level = cfg.get('confidence_level', 0.66)
conf_low = (1.0 - confidence_level) / 2.0
conf_high = (1.0 + confidence_level) / 2.0
# Calculate PDF and CDF
(ecs_lin, ecs_pdf) = ec.gaussian_pdf(psi_cube.data, ecs_cube.data,
np.mean(obs_cube.data),
np.std(obs_cube.data))
ecs_cdf = ec.cdf(ecs_lin, ecs_pdf)
# Calculate constrained ECS range
ecs_mean = ecs_lin[np.argmax(ecs_pdf)]
ecs_index_range = np.where((ecs_cdf >= conf_low) &
(ecs_cdf <= conf_high))[0]
ecs_range = ecs_lin[ecs_index_range]
ecs_low = min(ecs_range)
ecs_high = max(ecs_range)
return (ecs_mean, ecs_low, ecs_high)
def main(cfg):
"""Run the diagnostic."""
input_data = (
select_metadata(cfg['input_data'].values(), short_name='tas') +
select_metadata(cfg['input_data'].values(), short_name='tasa'))
if not input_data:
raise ValueError("This diagnostics needs 'tas' or 'tasa' variable")
# Get tas data
tas_cubes = {}
tas_obs = []
for (dataset, [data]) in group_metadata(input_data, 'dataset').items():
cube = iris.load_cube(data['filename'])
cube = cube.aggregated_by('year', iris.analysis.MEAN)
tas_cubes[dataset] = cube
if data['project'] == 'OBS':
tas_obs.append(dataset)
# Get time-dependent psi data
psi_cubes = {}
psi_obs = []
for (dataset, [data]) in group_metadata(
io.netcdf_to_metadata(cfg, pattern='psi_*.nc'), 'dataset').items():
cube = iris.load_cube(data['filename'])
cube = cube.aggregated_by('year', iris.analysis.MEAN)
psi_cubes[dataset] = cube
if data['project'] == 'OBS':
psi_obs.append(dataset)
# Get psi, ECS and psi for models
(psi_cube, ecs_cube, lambda_cube) = get_external_cubes(cfg)
# Plots
for obs_name in tas_obs:
logger.info("Observation for tas: %s", obs_name)
plot_temperature_anomaly(cfg, tas_cubes, lambda_cube, obs_name)
for obs_name in psi_obs:
logger.info("Observation for psi: %s", obs_name)
plot_psi(cfg, psi_cubes, lambda_cube, obs_name)
obs_cube = psi_cubes[obs_name]
plot_emergent_relationship(cfg, psi_cube, ecs_cube, lambda_cube,
obs_cube)
plot_pdf(cfg, psi_cube, ecs_cube, obs_cube)
plot_cdf(cfg, psi_cube, ecs_cube, obs_cube)
# Print ECS range
ecs_range = get_ecs_range(cfg, psi_cube, ecs_cube, obs_cube)
logger.info("Observational constraint: Ψ = (%.2f ± %.2f) K",
np.mean(obs_cube.data), np.std(obs_cube.data))
logger.info(
"Constrained ECS range: (%.2f - %.2f) K with best "
"estimate %.2f K", ecs_range[1], ecs_range[2], ecs_range[0])
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
plt.close()
```
#### File: diag_scripts/shared/io.py
```python
import fnmatch
import logging
import os
import iris
import numpy as np
from .iris_helpers import unify_1d_cubes
logger = logging.getLogger(__name__)
VAR_KEYS = [
'long_name',
'units',
]
NECESSARY_KEYS = VAR_KEYS + [
'dataset',
'filename',
'project',
'short_name',
]
def _has_necessary_attributes(metadata,
only_var_attrs=False,
log_level='debug'):
"""Check if dataset metadata has necessary attributes."""
keys_to_check = (VAR_KEYS +
['short_name'] if only_var_attrs else NECESSARY_KEYS)
for dataset in metadata:
for key in keys_to_check:
if key not in dataset:
getattr(logger, log_level)("Dataset '%s' does not have "
"necessary attribute '%s'", dataset,
key)
return False
return True
def get_all_ancestor_files(cfg, pattern=None):
"""Return a list of all files in the ancestor directories.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str, optional
Only return files which match a certain pattern.
Returns
-------
list of str
Full paths to the ancestor files.
"""
ancestor_files = []
input_dirs = [
d for d in cfg['input_files'] if not d.endswith('metadata.yml')
]
for input_dir in input_dirs:
for (root, _, files) in os.walk(input_dir):
if pattern is not None:
files = fnmatch.filter(files, pattern)
files = [os.path.join(root, f) for f in files]
ancestor_files.extend(files)
return ancestor_files
def get_ancestor_file(cfg, pattern):
"""Return a desired file in the ancestor directories.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str
Pattern which specifies the name of the file.
Returns
-------
str or None
Full path to the file or `None` if file not found.
"""
files = get_all_ancestor_files(cfg, pattern=pattern)
if not files:
logger.warning(
"No file with requested name %s found in ancestor "
"directories", pattern)
return None
if len(files) != 1:
logger.warning(
"Multiple files with requested pattern %s found (%s), returning "
"first appearance", pattern, files)
return files[0]
def netcdf_to_metadata(cfg, pattern=None, root=None):
"""Convert attributes of netcdf files to list of metadata.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str, optional
Only consider files which match a certain pattern.
root : str, optional (default: ancestor directories)
Root directory for the search.
Returns
-------
list of dict
List of dataset metadata.
"""
if root is None:
all_files = get_all_ancestor_files(cfg, pattern)
else:
all_files = []
for (base, _, files) in os.walk(root):
if pattern is not None:
files = fnmatch.filter(files, pattern)
files = [os.path.join(base, f) for f in files]
all_files.extend(files)
all_files = fnmatch.filter(all_files, '*.nc')
# Iterate over netcdf files
metadata = []
for path in all_files:
cube = iris.load_cube(path)
dataset_info = dict(cube.attributes)
for var_key in VAR_KEYS:
dataset_info[var_key] = getattr(cube, var_key)
dataset_info['short_name'] = cube.var_name
dataset_info['standard_name'] = cube.standard_name
dataset_info['filename'] = path
# Check if necessary keys are available
if _has_necessary_attributes([dataset_info], log_level='warning'):
metadata.append(dataset_info)
else:
logger.warning("Skipping '%s'", path)
return metadata
def metadata_to_netcdf(cube, metadata):
"""Convert single metadata dictionary to netcdf file.
Parameters
----------
cube : iris.cube.Cube
Cube to be written.
metadata : dict
Metadata for the cube.
"""
metadata = dict(metadata)
if not _has_necessary_attributes([metadata], log_level='warning'):
logger.warning("Cannot save cube\n%s", cube)
return
for var_key in VAR_KEYS:
setattr(cube, var_key, metadata.pop(var_key))
cube.var_name = metadata.pop('short_name')
cube.standard_name = None
if 'standard_name' in metadata:
standard_name = metadata.pop('standard_name')
try:
cube.standard_name = standard_name
except ValueError:
logger.debug("Got invalid standard_name '%s'", standard_name)
for (attr, val) in metadata.items():
if isinstance(val, bool):
metadata[attr] = str(val)
cube.attributes.update(metadata)
iris_save(cube, metadata['filename'])
def save_1d_data(cubes, path, coord_name, var_attrs, attributes=None):
"""Save 1D data for multiple datasets.
Create 2D cube with the dimensionsal coordinate `coord_name` and the
auxiliary coordinate `dataset` and save 1D data for every dataset given.
The cube is filled with missing values where no data exists for a dataset
at a certain point.
Note
----
Does not check metadata of the `cubes`, i.e. different names or units
will be ignored.
Parameters
----------
cubes : dict of iris.cube.Cube
1D `iris.cube.Cube`s (values) and corresponding datasets (keys).
path : str
Path to the new file.
coord_name : str
Name of the coordinate.
var_attrs : dict
Attributes for the variable (`short_name`, `long_name`, or `units`).
attributes : dict, optional
Additional attributes for the cube.
"""
var_attrs = dict(var_attrs)
if not cubes:
logger.warning("Cannot save 1D data, no cubes given")
return
if not _has_necessary_attributes(
[var_attrs], only_var_attrs=True, log_level='warning'):
logger.warning("Cannot write file '%s'", path)
return
datasets = list(cubes.keys())
cube_list = iris.cube.CubeList(list(cubes.values()))
cube_list = unify_1d_cubes(cube_list, coord_name)
data = [c.data for c in cube_list]
dataset_coord = iris.coords.AuxCoord(datasets, long_name='dataset')
coord = cube_list[0].coord(coord_name)
if attributes is None:
attributes = {}
var_attrs['var_name'] = var_attrs.pop('short_name')
# Create new cube
cube = iris.cube.Cube(np.ma.array(data),
aux_coords_and_dims=[(dataset_coord, 0), (coord, 1)],
attributes=attributes,
**var_attrs)
iris_save(cube, path)
def iris_save(source, path):
"""Save :mod:`iris` objects with correct attributes.
Parameters
----------
source : iris.cube.Cube or iterable of iris.cube.Cube
Cube(s) to be saved.
path : str
Path to the new file.
"""
if isinstance(source, iris.cube.Cube):
source.attributes['filename'] = path
else:
for cube in source:
cube.attributes['filename'] = path
iris.save(source, path)
logger.info("Wrote %s", path)
def save_scalar_data(data, path, var_attrs, aux_coord=None, attributes=None):
"""Save scalar data for multiple datasets.
Create 1D cube with the auxiliary dimension `dataset` and save scalar data
for every dataset given.
Note
----
Missing values can be added by `np.nan`.
Parameters
----------
data : dict
Scalar data (values) and corresponding datasets (keys).
path : str
Path to the new file.
var_attrs : dict
Attributes for the variable (`short_name`, `long_name` and `units`).
aux_coord : iris.coords.AuxCoord, optional
Optional auxiliary coordinate.
attributes : dict, optional
Additional attributes for the cube.
"""
var_attrs = dict(var_attrs)
if not data:
logger.warning("Cannot save scalar data, no data given")
return
if not _has_necessary_attributes(
[var_attrs], only_var_attrs=True, log_level='warning'):
logger.warning("Cannot write file '%s'", path)
return
dataset_coord = iris.coords.AuxCoord(list(data), long_name='dataset')
if attributes is None:
attributes = {}
var_attrs['var_name'] = var_attrs.pop('short_name')
coords = [(dataset_coord, 0)]
if aux_coord is not None:
coords.append((aux_coord, 0))
cube = iris.cube.Cube(np.ma.masked_invalid(list(data.values())),
aux_coords_and_dims=coords,
attributes=attributes,
**var_attrs)
iris_save(cube, path)
```
#### File: diag_scripts/thermodyn_diagtool/fourier_coefficients.py
```python
import numpy as np
from netCDF4 import Dataset
GP_RES = np.array([16, 32, 48, 64, 96, 128, 256, 384, 512, 1024, 2048, 4096])
FC_RES = np.array([5, 10, 15, 21, 31, 43, 85, 127, 171, 341, 683, 1365])
G_0 = 9.81 # Gravity acceleration
GAM = 0.0065 # Standard atmosphere lapse rate
GAS_CON = 287.0 # Gas constant
P_0 = 10000 # Reference tropospheric pressure
def fourier_coeff(tadiagfile, outfile, ta_input, tas_input):
"""Compute Fourier coefficients in lon direction.
Receive as input:
- tadiagfile: the name of a file to store modified t fields;
- outfile: the name of a file to store the Fourier coefficients;
- ta_input: the name of a file containing t,u,v,w fields;
- tas_input: the name of a file containing t2m field.
"""
with Dataset(ta_input) as dataset:
lon = dataset.variables['lon'][:]
lat = dataset.variables['lat'][:]
lev = dataset.variables['plev'][:]
time = dataset.variables['time'][:]
t_a = dataset.variables['ta'][:, :, :, :]
u_a = dataset.variables['ua'][:, :, :, :]
v_a = dataset.variables['va'][:, :, :, :]
wap = dataset.variables['wap'][:, :, :, :]
nlon = len(lon)
nlat = len(lat)
nlev = len(lev)
ntime = len(time)
i = np.min(np.where(2 * nlat <= GP_RES))
trunc = FC_RES[i] + 1
wave2 = np.linspace(0, trunc - 1, trunc)
with Dataset(tas_input) as dataset:
tas = dataset.variables['tas'][:, :, :]
tas = tas[:, ::-1, :]
ta1_fx = np.array(t_a)
deltat = np.zeros([ntime, nlev, nlat, nlon])
p_s = np.full([ntime, nlat, nlon], P_0)
for i in np.arange(nlev - 1, 0, -1):
h_1 = np.ma.masked_where(ta1_fx[:, i, :, :] != 0, ta1_fx[:, i, :, :])
if np.any(h_1.mask > 0):
deltat[:, i - 1, :, :] = np.where(ta1_fx[:, i - 1, :, :] != 0,
deltat[:, i - 1, :, :],
(ta1_fx[:, i, :, :] - tas))
deltat[:, i - 1, :, :] = (
(1 * np.array(h_1.mask)) * np.array(deltat[:, i - 1, :, :]))
d_p = -(
(P_0 * G_0 / (GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i - 1, :, :] != 0, p_s, lev[i - 1] + d_p)
for k in np.arange(0, nlev - i - 1, 1):
h_3 = np.ma.masked_where(ta1_fx[:, i + k, :, :] != 0,
ta1_fx[:, i + k, :, :])
if np.any(h_3.mask > 0):
deltat[:, i - 1, :, :] = np.where(
ta1_fx[:, i + k, :, :] != 0, deltat[:, i - 1, :, :],
(ta1_fx[:, i + k + 1, :, :] - tas))
d_p = -((P_0 * G_0 /
(GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i + k, :, :] != 0, p_s,
lev[i + k] + d_p)
ta2_fx = np.array(t_a)
mask = np.zeros([nlev, ntime, nlat, nlon])
dat = np.zeros([nlev, ntime, nlat, nlon])
tafr_bar = np.zeros([nlev, ntime, nlat, nlon])
deltap = np.zeros([ntime, nlev, nlat, nlon])
for i in np.arange(nlev):
deltap[:, i, :, :] = p_s - lev[i]
h_2 = np.ma.masked_where(ta2_fx[:, i, :, :] == 0, ta2_fx[:, i, :, :])
mask[i, :, :, :] = np.array(h_2.mask)
tafr_bar[i, :, :, :] = (1 * np.array(mask[i, :, :, :]) * (
tas - GAM * GAS_CON / (G_0 * p_s) * deltap[:, i, :, :] * tas))
dat[i, :, :, :] = (
ta2_fx[:, i, :, :] * (1 - 1 * np.array(mask[i, :, :, :])))
t_a[:, i, :, :] = dat[i, :, :, :] + tafr_bar[i, :, :, :]
pr_output_diag(t_a, ta_input, tadiagfile, 'ta')
tafft_p = np.fft.fft(t_a, axis=3)[:, :, :, :int(trunc / 2)] / (nlon)
uafft_p = np.fft.fft(u_a, axis=3)[:, :, :, :int(trunc / 2)] / (nlon)
vafft_p = np.fft.fft(v_a, axis=3)[:, :, :, :int(trunc / 2)] / (nlon)
wapfft_p = np.fft.fft(wap, axis=3)[:, :, :, :int(trunc / 2)] / (nlon)
tafft = np.zeros([ntime, nlev, nlat, trunc])
uafft = np.zeros([ntime, nlev, nlat, trunc])
vafft = np.zeros([ntime, nlev, nlat, trunc])
wapfft = np.zeros([ntime, nlev, nlat, trunc])
tafft[:, :, :, 0::2] = np.real(tafft_p)
tafft[:, :, :, 1::2] = np.imag(tafft_p)
uafft[:, :, :, 0::2] = np.real(uafft_p)
uafft[:, :, :, 1::2] = np.imag(uafft_p)
vafft[:, :, :, 0::2] = np.real(vafft_p)
vafft[:, :, :, 1::2] = np.imag(vafft_p)
wapfft[:, :, :, 0::2] = np.real(wapfft_p)
wapfft[:, :, :, 1::2] = np.imag(wapfft_p)
dict_v = {'ta': tafft, 'ua': uafft, 'va': vafft, 'wap': wapfft}
file_desc = 'Fourier coefficients'
pr_output(dict_v, ta_input, outfile, file_desc, wave2)
def pr_output(dict_v, nc_f, fileo, file_desc, wave2):
"""Print outputs to NetCDF.
Save fields to NetCDF, retrieving information from an existing
NetCDF file. Metadata are transferred from the existing file to the
new one.
Arguments:
- var1, var2, var3, var4: the fields to be stored, with shape
(time,level,wave,lon);
- nc_f: the existing dataset, from where the metadata are
retrieved. Coordinates time,level and lon have to be the same
dimension as the fields to be saved to the new files;
- fileo: the name of the output file;
- wave2: an array containing the zonal wavenumbers;
- name1, name2, name3, name4: the name of the variables to be
saved;
PROGRAMMER(S)
<NAME> (2014), modified by <NAME> (2018).
"""
# Writing NetCDF files
with Dataset(fileo, 'w', format='NETCDF4') as var_nc_fid:
var_nc_fid.description = file_desc
with Dataset(nc_f, 'r') as nc_fid:
extr_time(nc_fid, var_nc_fid)
extr_lat(nc_fid, var_nc_fid, 'lat')
extr_plev(nc_fid, var_nc_fid)
# Write the wave dimension
var_nc_fid.createDimension('wave', len(wave2))
var_nc_fid.createVariable('wave', nc_fid.variables['plev'].dtype,
('wave', ))
var_nc_fid.variables['wave'][:] = wave2
for key in dict_v:
value = dict_v[key]
var1_nc_var = var_nc_fid.createVariable(
key, 'f8', ('time', 'plev', 'lat', 'wave'))
varatts(var1_nc_var, key)
var_nc_fid.variables[key][:, :, :, :] = value
def pr_output_diag(var1, nc_f, fileo, name1):
"""Print processed ta field to NetCDF file.
Save fields to NetCDF, retrieving information from an existing
NetCDF file. Metadata are transferred from the existing file to the
new one.
Arguments:
- var1: the field to be stored, with shape (time,level,lat,lon);
- nc_f: the existing dataset, from where the metadata are
retrieved. Coordinates time,level, lat and lon have to be the
same dimension as the fields to be saved to the new files;
- fileo: the name of the output file;
- name1: the name of the variable to be saved;
PROGRAMMER(S)
<NAME> (2014), modified by <NAME> (2018).
"""
with Dataset(fileo, 'w', format='NETCDF4') as var_nc_fid:
var_nc_fid.description = "Fourier coefficients"
with Dataset(nc_f, 'r') as nc_fid:
# Extract data from NetCDF file nad write them to the new file
extr_time(nc_fid, var_nc_fid)
extr_lat(nc_fid, var_nc_fid, 'lat')
extr_lon(nc_fid, var_nc_fid)
extr_plev(nc_fid, var_nc_fid)
var1_nc_var = var_nc_fid.createVariable(name1, 'f8',
('time', 'plev', 'lat', 'lon'))
varatts(var1_nc_var, name1)
var_nc_fid.variables[name1][:, :, :, :] = var1
var_nc_fid.close() # close the new file
def extr_lat(nc_fid, var_nc_fid, latn):
"""Extract lat coord. from NC files and save them to a new NC file.
Arguments:
- nc_f: the existing dataset, from where the metadata are
retrieved. Time,level and lon dimensions
are retrieved;
- var_nc_fid: the id of the new NC dataset previously created;
- latn: the name of the latitude dimension;
"""
# Extract coordinates from NetCDF file
lats = nc_fid.variables['lat'][:]
var_nc_fid.createDimension(latn, len(lats))
var_nc_dim = var_nc_fid.createVariable(latn, nc_fid.variables['lat'].dtype,
(latn, ))
for ncattr in nc_fid.variables['lat'].ncattrs():
var_nc_dim.setncattr(ncattr, nc_fid.variables['lat'].getncattr(ncattr))
var_nc_fid.variables[latn][:] = lats
def extr_lon(nc_fid, var_nc_fid):
"""Extract lat coord. from NC files and save them to a new NC file.
Arguments:
- nc_f: the existing dataset, from where the metadata are
retrieved. Time,level and lon dimensions
are retrieved;
- var_nc_fid: the id of the new NC dataset previously created;
"""
# Extract coordinates from NetCDF file
lons = nc_fid.variables['lon'][:]
var_nc_fid.createDimension('lon', len(lons))
var_nc_dim = var_nc_fid.createVariable(
'lon', nc_fid.variables['lon'].dtype, ('lon', ))
for ncattr in nc_fid.variables['lon'].ncattrs():
var_nc_dim.setncattr(ncattr, nc_fid.variables['lon'].getncattr(ncattr))
var_nc_fid.variables['lon'][:] = lons
def extr_plev(nc_fid, var_nc_fid):
"""Extract plev coord. from NC files and save them to a new NC file.
Arguments:
- nc_f: the existing dataset, from where the metadata are
retrieved. Time,level and lon dimensions
are retrieved;
- var_nc_fid: the id of the new NC dataset previously created;
"""
plev = nc_fid.variables['plev'][:]
var_nc_fid.createDimension('plev', len(plev))
var_nc_dim = var_nc_fid.createVariable(
'plev', nc_fid.variables['plev'].dtype, ('plev', ))
for ncattr in nc_fid.variables['plev'].ncattrs():
var_nc_dim.setncattr(ncattr,
nc_fid.variables['plev'].getncattr(ncattr))
var_nc_fid.variables['plev'][:] = plev
def extr_time(nc_fid, var_nc_fid):
"""Extract time coord. from NC files and save them to a new NC file.
Arguments:
- nc_f: the existing dataset, from where the metadata are
retrieved. Time,level and lon dimensions
are retrieved;
- var_nc_fid: the id of the new NC dataset previously created;
"""
# Extract coordinates from NetCDF file
time = nc_fid.variables['time'][:]
# Using our previous dimension info, we can create the new dimensions.
var_nc_fid.createDimension('time', len(time))
var_nc_dim = var_nc_fid.createVariable(
'time', nc_fid.variables['time'].dtype, ('time', ))
for ncattr in nc_fid.variables['time'].ncattrs():
var_nc_dim.setncattr(ncattr,
nc_fid.variables['time'].getncattr(ncattr))
var_nc_fid.variables['time'][:] = time
def varatts(w_nc_var, varname):
"""Add attibutes to the variables, depending on their name.
Arguments:
- w_nc_var: a variable object;
- varname: the name of the variable, among ta, ua, va and wap.
"""
if varname == 'ta':
w_nc_var.setncatts({
'long_name': "Air temperature",
'units': "K",
'level_desc': 'pressure levels'
})
elif varname == 'ua':
w_nc_var.setncatts({
'long_name': "Eastward wind",
'units': "m s-1",
'level_desc': 'pressure levels'
})
elif varname == 'va':
w_nc_var.setncatts({
'long_name': "Northward wind",
'units': "m s-1",
'level_desc': 'pressure levels'
})
elif varname == 'wap':
w_nc_var.setncatts({
'long_name': 'Lagrangian tendency of '
'air pressure',
'units': "Pa s-1",
'level_desc': 'pressure levels'
})
```
|
{
"source": "jeromaerts/eWaterCycle_example_notebooks",
"score": 3
}
|
#### File: eWaterCycle_example_notebooks/utils/usgs_streamflow_download.py
```python
import os
import sys
import urllib.request as urllib2
import numpy as np
import pandas as pd
def download_usgs_data(
usgs_info_file,
outputfolder,
output_format,
startDT,
endDT,
parameterCd,
basin_start,
basin_end,
convert_unit_timestep=True
):
# More information: https://waterservices.usgs.gov/rest/IV-Test-Tool.html
# output_format e.g. ('json', 'rdb')
# startDT e.g. ('1980-01-01')
# endDT e.g.('2018-12-31')
# parameterCd e.g. ('00060') discharge, cubic feet per second
# 30208 m3/s
# excel_format : [USGS_ID, lat, lon]!
# Load USGS gauge ids
stations = pd.read_table(usgs_info_file, delimiter=";")
# Drop n number of rows, Remove after testing!
stations = stations.iloc[basin_start:basin_end]
# Create output folder
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
for index, station in stations.iterrows():
# Fix import error -> adds 0 value leading gauge id
usgsid = str(np.array(station["gauge_id"], dtype=np.int))
if len(usgsid) == 7:
usgsid = "0" + usgsid
# Create download link
url = (
"https://waterservices.usgs.gov/nwis/iv/?format="
+ output_format
+ ",1.0&sites="
+ usgsid
+ "&startDT="
+ startDT
+ "&endDT="
+ endDT
+ "¶meterCd="
+ parameterCd
+ "&siteStatus=all"
)
out = outputfolder + "/" + usgsid + "." + output_format
urllib2.urlretrieve(url, out)
if convert_unit_timestep is True:
# Changes format table to [datetime, discharge]
df = pd.read_table(
out,
skiprows=55,
usecols=[2, 4],
header=None,
names=["datetime", "discharge"],
) # Read rdb table and set column headers
# Set Ice and Dis values to 0
df["discharge"] = df["discharge"].replace("Ice", 0)
df["discharge"] = df["discharge"].replace("Dis", 0)
# Get equipment malfunction values
indexvals = df.index[df["discharge"] == "Eqp"].tolist()
for i in indexvals:
# Overwrite with previous measurement
df["discharge"].loc[i] = df["discharge"].loc[i - 1]
# Convert to datetime and set index
df.index = pd.to_datetime(
df["datetime"], infer_datetime_format=True
)
# Drop obsolete column
df = df.drop(columns="datetime")
# Resample to hourly values
df = df.resample("H").mean()
# Print USGS ID for log
print(" USGSID")
print(usgsid)
# Set table id and coordinates
df["USGS_ID"] = usgsid
df["lat"] = station["gauge_lat"]
df["lon"] = station["gauge_lon"]
# Convert series to float
df["discharge"] = df["discharge"].apply(lambda x: float(x))
# Convert to cubic meters per second
df["discharge"] = df["discharge"].apply(lambda x: x / 35.315)
# Write daily UTC files
df.index = df.index.tz_localize("UTC")
df = df.resample("D").mean()
df["USGS_ID"] = usgsid
df.to_csv(outputfolder + "/" + usgsid + "_UTC_daily.csv")
log = print(usgsid + " Downloaded")
else:
exit()
return log
# Set system variables from bash
usgs_info_file = sys.argv[1]
outputfolder = sys.argv[2]
start_date = sys.argv[3]
end_date = sys.argv[4]
basin_start = int(sys.argv[5])
basin_end = int(sys.argv[6])
download_usgs_data(
usgs_info_file, outputfolder, "rdb", start_date, end_date, "00060", basin_start, basin_end
)
```
|
{
"source": "jeromaerts/flood_hazard_map_comparison_2019",
"score": 3
}
|
#### File: jeromaerts/flood_hazard_map_comparison_2019/MAI_calculation_example.py
```python
import os
import numpy as np
import time
import rasterio
# Set rastersum and haversine filenames
raster = "rastersum_RP100.tif"
haversine = "haversine_grid.tif"
def inundated_area(aggr_cat):
# Function calculates the inundated area using a haversine grid for each aggregate category
src_A = rasterio.open(raster)
profile = src_A.meta.copy()
src_B = rasterio.open(haversine)
inundated_area = np.zeros([1,73800])
i = 0
for ji, window in src_A.block_windows(1):
i += 1
affine = rasterio.windows.transform(window, src_A.transform)
height, width = rasterio.windows.shape(window)
bbox = rasterio.windows.bounds(window, src_A.transform)
profile.update({
'height': height,
'width': width,
'affine': affine})
array_A = src_A.read(window=window)
array_B = src_B.read(window=window)
if array_A.shape[0] == 1: #Reshape 3 dimensional array to 2 dimensional array
array_A = array_A.reshape(array_A.shape[1:]) #Reshape 3 dimensional array to 2 dimensional array
if array_B.shape[0] == 1: #Reshape 3 dimensional array to 2 dimensional array
array_B = array_B.reshape(array_B.shape[1:]) #Reshape 3 dimensional array to 2 dimensional array
array_A = np.where(array_A == aggr_cat,1,0) #extract binary values
# Calculate inundated area in km2 for each aggregate_category
array_combined = array_A * array_B
array_combined = np.sum(array_combined)
inundated_area = np.append(inundated_area, array_combined)
inundated_area = np.sum(inundated_area,0)
return inundated_area
# Calculate inundated area for each aggregate category
inun_1 = inundated_area(1)
inun_2 = inundated_area(2)
inun_3 = inundated_area(3)
inun_4 = inundated_area(4)
inun_5 = inundated_area(5)
inun_6 = inundated_area(6)
# Calculate total inundated area
area_total = inun_1+inun_2+inun_3+inun_4+inun_5+inun_6
a2 = (2/6)*inun_2
a3 = (3/6)*inun_3
a4 = (4/6)*inun_4
a5 = (5/6)*inun_5
a6 = (6/6)*inun_6
# Calculate MAI
MAI = (a2+a3+a4+a5+a6)/area_total
```
|
{
"source": "jeromba6/transip_api_v6",
"score": 3
}
|
#### File: transip_api_v6/transipApiV6/Generic.py
```python
from OpenSSL import crypto
import base64
import requests
import random
import string
import json
def randomDigits(self, stringLength=10):
"""Generate a random string of letters and digits """
return ''.join(random.choice(string.digits) for i in range(stringLength))
class Generic:
base_url='https://api.transip.nl/v6/auth'
def __init__(self, login, key, demo = False):
self.login = login
self.key = key
self.demo = demo
def get_jwt(self):
if self.demo:
return '<KEY>'
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, self.key)
data = '{ "login": "' + self.login + '", "nonce": ' + randomDigits(10) + ' }'
signature = base64.b64encode(crypto.sign(pkey, data.encode(), "sha512")).decode()
headers = {'Signature': signature, 'Accept': 'application/json'}
res = requests.post(self.base_url, headers=headers, data=data.encode())
if res.status_code != 201:
print('Could not create a JWT. Status_code was: ' + str(res.status_code))
print(res.text)
exit(1)
return json.loads(res.text)['token']
def get_headers(self):
return {'Authorization': 'Bearer ' + Generic.get_jwt(self), 'Accept': 'application/json'}
```
|
{
"source": "Jerome1434/ampscan",
"score": 3
}
|
#### File: ampscan/ampscan/core.py
```python
import numpy as np
import os
import struct
from ampscan.trim import trimMixin
from ampscan.smooth import smoothMixin
from ampscan.vis import visMixin
# The file path used in doc examples
filename = os.path.join(os.getcwd(), "tests", "stl_file.stl")
class AmpObject(trimMixin, smoothMixin, visMixin):
r"""
Base class for the ampscan project.
Stores mesh data and extra information
Inherits methods via mixins
Flexible class able to deal with surface data using 3 or 4 node faces and
visualise nodal data such as FEA outputs or shape deviations
Parameters
----------
data : str or dict
Data input as either a string to import from an external file or a
dictionary to pull values directly
stype : str, optional
descriptor of the type of data the AmpObject is representing, e.g
'limb' or 'socket'. Default is 'limb'
Returns
-------
AmpObject
Initiation of the object
Examples
-------
>>> amp = AmpObject(filename)
"""
def __init__(self, data=None, stype='limb', unify=True, struc=True):
self.stype = stype
self.createCMap()
if isinstance(data, str):
self.read_stl(data, unify, struc)
elif isinstance(data, dict):
for k, v in data.items():
setattr(self, k, v)
self.calcStruct()
elif isinstance(data, bytes):
self.read_bytes(data, unify, struc)
def read_stl(self, filename, unify=True, struc=True):
"""
Function to read .stl file from filename and import data into
the AmpObj
Parameters
-----------
filename: str
file path of the .stl file to read
unify: boolean, default True
unify the coincident vertices of each face
struc: boolean, default True
Calculate the underlying structure of the mesh, such as edges
"""
with open(filename, 'rb') as fh:
# Defined no of bytes for header and no of faces
HEADER_SIZE = 80
COUNT_SIZE = 4
# State the data type and length in bytes of the normals and vertices
data_type = np.dtype([('normals', np.float32, (3, )),
('vertices', np.float32, (9, )),
('atttr', '<i2', (1, ))])
# Read the header of the STL
head = fh.read(HEADER_SIZE).lower()
# Read the number of faces
NFaces, = struct.unpack('@i', fh.read(COUNT_SIZE))
# Read the remaining data and save as void, then close file
data = np.fromfile(fh, data_type)
# Test if the file is ascii
if str(head[:5], 'utf-8') == 'solid':
raise ValueError("ASCII files not supported")
# Write the data to a numpy arrays in AmpObj
tfcond = NFaces==data['vertices'].shape[0] #assigns true or false to tfcond
if not tfcond: #if tfcond is false, raise error
raise ValueError("File is corrupt") #if true, move on
vert = np.resize(np.array(data['vertices']), (NFaces*3, 3))
norm = np.array(data['normals'])
faces = np.reshape(range(NFaces*3), [NFaces,3])
self.faces = faces
self.vert = vert
self.norm = norm
# Call function to unify vertices of the array
if unify is True:
self.unifyVert()
# Call function to calculate the edges array
# self.fixNorm()
if struc is True:
self.calcStruct()
self.values = np.zeros([len(self.vert)])
def read_bytes(self, data, unify=True, struc=True):
"""
Function to read .stl file from filename and import data into
the AmpObj
Parameters
-----------
filename: str
file path of the .stl file to read
unify: boolean, default True
unify the coincident vertices of each face
struc: boolean, default True
Calculate the underlying structure of the mesh, such as edges
"""
# Defined no of bytes for header and no of faces
HEADER_SIZE = 80
COUNT_SIZE = 4
# State the data type and length in bytes of the normals and vertices
data_type = np.dtype([('normals', np.float32, (3, )),
('vertices', np.float32, (9, )),
('atttr', '<i2', (1, ))])
# Read the header of the STL
head = data[:HEADER_SIZE].lower()
# Read the number of faces
NFaces, = struct.unpack('@i', data[HEADER_SIZE:HEADER_SIZE+COUNT_SIZE])
# Read the remaining data and save as void, then close file
data = np.frombuffer(data[COUNT_SIZE+HEADER_SIZE:], data_type)
# Test if the file is ascii
if str(head[:5], 'utf-8') == 'solid':
raise ValueError("ASCII files not supported")
# Write the data to a numpy arrays in AmpObj
tfcond = NFaces==data['vertices'].shape[0] #assigns true or false to tfcond
if not tfcond: #if tfcond is false, raise error
raise ValueError("File is corrupt") #if true, move on
vert = np.resize(np.array(data['vertices']), (NFaces*3, 3))
norm = np.array(data['normals'])
faces = np.reshape(range(NFaces*3), [NFaces,3])
self.faces = faces
self.vert = vert
self.norm = norm
# Call function to unify vertices of the array
if unify is True:
self.unifyVert()
# Call function to calculate the edges array
# self.fixNorm()
if struc is True:
self.calcStruct()
self.values = np.zeros([len(self.vert)])
def calcStruct(self, norm=True, edges=True,
edgeFaces=True, faceEdges=True, vNorm=False):
r"""
Top level function to calculate the underlying structure of the
AmpObject
Parameters
----------
norm: boolean, default True
If true, the normals of each face in the mesh will be calculated
edges: boolean, default True
If true, the edges of the mesh will be calculated, the refers to
the vertex index that make up any edge
edgeFaces: boolean, default True
If true, the edgeFaces array of the mesh will be calculated, this
refers to the index of the three edges that make up each face
faceEdges: boolean, default True
If true, the faceEdges array will be calculated, this refers to
index of the faces that are coincident to each edge. Normally,
there are two faces per edge, if there is only one, then -99999
will be used to indicate this
vNorm: boolean, default False
If true, the normals of each vertex in the mesh will be calculated
"""
if norm is True:
self.calcNorm()
if edges is True:
self.calcEdges()
if edgeFaces is True:
self.calcEdgeFaces()
if faceEdges is True:
self.calcFaceEdges()
if vNorm is True:
self.calcVNorm()
def unifyVert(self):
r"""
Function to unify coincident vertices of the mesh to reduce
size of the vertices array enabling speed increases when performing
calculations using the vertex array
Examples
--------
>>> amp = AmpObject(filename, unify=False)
>>> amp.vert.shape
(44832, 3)
>>> amp.unifyVert()
>>> amp.vert.shape
(7530, 3)
"""
# Requires numpy 1.13
self.vert, indC = np.unique(self.vert, return_inverse=True, axis=0)
# Maps the new vertices index to the face array
self.faces = np.resize(indC[self.faces],
(len(self.norm), 3)).astype(np.int32)
def calcEdges(self):
"""
Function to compute the edges array ie the index of the two vertices
that make up each edge
Returns
-------
edges: ndarray
Denoting the indicies of two vertices on each edge
"""
# Get edges array
self.edges = np.reshape(self.faces[:, [0, 1, 0, 2, 1, 2]], [-1, 2])
self.edges = np.sort(self.edges, 1)
# Unify the edges
self.edges, indC = np.unique(self.edges, return_inverse=True, axis=0)
def calcEdgeFaces(self):
r"""
Function that calculates the indicies of the three edges that make up
each face
Returns
-------
edgesFace: ndarray
Denoting the indicies of the three edges on each face
"""
edges = np.reshape(self.faces[:, [0, 1, 0, 2, 1, 2]], [-1, 2])
edges = np.sort(edges, 1)
# Unify the edges
edges, indC = np.unique(edges, return_inverse=True, axis=0)
# Get edges on each face
self.edgesFace = np.reshape(range(len(self.faces)*3), [-1,3])
#Remap the edgesFace array
self.edgesFace = indC[self.edgesFace].astype(np.int32)
def calcFaceEdges(self):
r"""
Function that calculates the indicies of the faces on each edge
Returns
-------
faceEdges: ndarray
The indicies of the faces in each edge, edges may have either
1 or 2 faces, if 1 then the second index will be NaN
"""
#Initiate the faceEdges array
self.faceEdges = np.empty([len(self.edges), 2], dtype=np.int32)
self.faceEdges.fill(-99999)
# Denote the face index for flattened edge array
fInd = np.repeat(np.array(range(len(self.faces))), 3)
# Flatten edge array
eF = np.reshape(self.edgesFace, [-1])
eFInd = np.unique(eF, return_index=True)[1]
logic = np.zeros([len(eF)], dtype=bool)
logic[eFInd] = True
self.faceEdges[eF[logic], 0] = fInd[logic]
self.faceEdges[eF[~logic], 1] = fInd[~logic]
def calcNorm(self):
r"""
Calculate the normal of each face of the AmpObj
Returns
-------
norm: ndarray
normal of each face
"""
norms = np.cross(self.vert[self.faces[:,1]] -
self.vert[self.faces[:,0]],
self.vert[self.faces[:,2]] -
self.vert[self.faces[:,0]])
mag = np.linalg.norm(norms, axis=1)
self.norm = np.divide(norms, mag[:,None])
def fixNorm(self):
r"""
Fix normals of faces so they all face outwards
"""
fC = self.vert[self.faces].mean(axis=1)
cent = self.vert.mean(axis=0)
# polarity = np.sum(self.norm * (fC-cent), axis=1) < 0
# if polarity.mean() > 0.5:
# self.faces[:, [1,2]] = self.faces[:, [2,1]]
# self.calcNorm()
# if hasattr(self, 'vNorm'): self.calcVNorm()
polarity = np.einsum('ij, ij->i', fC - cent, self.norm) < 0
# self.faces[polarity, [1,2]] = self.faces[polarity, [2,1]]
for i, f in enumerate(self.faces):
if polarity[i] == True:
self.faces[i, :] = [f[0], f[2], f[1]]
self.calcNorm()
if hasattr(self, 'vNorm'): self.calcVNorm()
def calcVNorm(self):
"""
Function to compute the vertex normals based upon the mean of the
connected face normals
Returns
-------
vNorm: ndarray
normal of each vertex
"""
f = self.faces.flatten()
o_idx = f.argsort()
row, col = np.unravel_index(o_idx, self.faces.shape)
ndx = np.searchsorted(f[o_idx], range(self.vert.shape[0]), side='right')
ndx = np.r_[0, ndx]
norms = self.norm[row, :]
self.vNorm = np.zeros(self.vert.shape)
for i in range(self.vert.shape[0]):
self.vNorm[i, :] = np.nanmean(norms[ndx[i]:ndx[i+1], :], axis=0)
def save(self, filename):
r"""
Function to save the AmpObj as a binary .stl file
Parameters
-----------
filename: str
file path of the .stl file to save to
"""
self.calcNorm()
fv = self.vert[np.reshape(self.faces, len(self.faces)*3)]
with open(filename, 'wb') as fh:
header = '%s' % (filename)
header = header.split('/')[-1].encode('utf-8')
header = header[:80].ljust(80, b' ')
packed = struct.pack('@i', len(self.faces))
fh.write(header)
fh.write(packed)
data_type = np.dtype([('normals', np.float32, (3, )),
('vertices', np.float32, (9, )),
('atttr', '<i2', (1, ))])
data_write = np.zeros(len(self.faces), dtype=data_type)
data_write['normals'] = self.norm
data_write['vertices'] = np.reshape(fv, (len(self.faces), 9))
data_write.tofile(fh)
def translate(self, trans):
r"""
Translate the AmpObj in 3D space
Parameters
-----------
trans: array_like
Translation in [x, y, z]
"""
# Check that trans is array like
if isinstance(trans, (list, np.ndarray, tuple)):
# Check that trans has exactly 3 dimensions
if len(trans) == 3:
self.vert[:] += trans
else:
raise ValueError("Translation has incorrect dimensions. Expected 3 but found: " + str(len(trans)))
else:
raise TypeError("Translation is not array_like: " + trans)
def centre(self):
r"""
Centre the AmpObject based upon the mean of all the vertices
"""
self.translate(-self.vert.mean(axis=0))
def centreStatic(self, static):
r"""
Centre this AmpObject on the static AmpObject's centroid based upon the mean of all the vertices
Parameters
----------
static : AmpObject
The static shape to center this object onto
"""
if isinstance(static, AmpObject):
self.translate(-self.vert.mean(axis=0)+static.vert.mean(axis=0))
else:
raise TypeError("centre_static method expects AmpObject, found: {}".format(type(static)))
def rotateAng(self, rot, ang='rad', norms=True):
r"""
Rotate the AmpObj in 3D space according to three angles
Parameters
-----------
rot: array_like
Rotation around [x, y, z]
ang: str, default 'rad'
Specify if the euler angles are in degrees or radians.
Default is radians
Examples
--------
>>> amp = AmpObject(filename)
>>> ang = [np.pi/2, -np.pi/4, np.pi/3]
>>> amp.rotateAng(ang, ang='rad')
"""
# Check that ang is valid
if ang not in ('rad', 'deg'):
raise ValueError("Ang expected 'rad' or 'deg' but {} was found".format(ang))
if isinstance(rot, (tuple, list, np.ndarray)):
R = self.rotMatrix(rot, ang)
self.rotate(R, norms)
else:
raise TypeError("rotateAng requires a list")
def rotate(self, R, norms=True):
r"""
Rotate the AmpObject using a rotation matrix
Parameters
----------
R: array_like
A 3x3 array specifying the rotation matrix
norms: boolean, default True
"""
if isinstance(R, (list, tuple)):
# Make R a np array if its a list or tuple
R = np.array(R, np.float)
elif not isinstance(R, np.ndarray):
# If
raise TypeError("Expected R to be array-like but found: " + str(type(R)))
if len(R) != 3 or len(R[0]) != 3:
# Incorrect dimensions
if isinstance(R, np.ndarray):
raise ValueError("Expected 3x3 array, but found: {}".format(R.shape))
else:
raise ValueError("Expected 3x3 array, but found: 3x"+str(len(R)))
self.vert[:, :] = np.dot(self.vert, R.T)
if norms is True:
self.norm[:, :] = np.dot(self.norm, R.T)
if hasattr(self, 'vNorm'):
self.vNorm[:, :] = np.dot(self.vNorm, R.T)
def rigidTransform(self, R=None, T=None):
r"""
Perform a rigid transformation on the AmpObject, first the rotation,
then the translation
Parameters
----------
R: array_like, default None
A 3x3 array specifying the rotation matrix
T: array_like, defauly None
An array of the form [x, y, z] which specifies the translation
"""
if R is not None:
if isinstance(R, (tuple, list, np.ndarray)):
self.rotate(R, True)
else:
raise TypeError("Expecting array-like rotation, but found: "+type(R))
if T is not None:
if isinstance(T, (tuple, list, np.ndarray)):
self.translate(T)
else:
raise TypeError("Expecting array-like translation, but found: "+type(T))
@staticmethod
def rotMatrix(rot, ang='rad'):
r"""
Calculate the rotation matrix from three angles, the order is assumed
as around the x, then y, then z axis
Parameters
----------
rot: array_like
Rotation around [x, y, z]
ang: str, default 'rad'
Specify if the Euler angles are in degrees or radians
Returns
-------
R: array_like
The calculated 3x3 rotation matrix
"""
# Check that rot is valid
if not isinstance(rot, (tuple, list, np.ndarray)):
raise TypeError("Expecting array-like rotation, but found: "+type(rot))
elif len(rot) != 3:
raise ValueError("Expecting 3 arguments but found: {}".format(len(rot)))
# Check that ang is valid
if ang not in ('rad', 'deg'):
raise ValueError("Ang expected 'rad' or 'deg' but {} was found".format(ang))
if ang == 'deg':
rot = np.deg2rad(rot)
[angx, angy, angz] = rot
Rx = np.array([[1, 0, 0],
[0, np.cos(angx), -np.sin(angx)],
[0, np.sin(angx), np.cos(angx)]])
Ry = np.array([[np.cos(angy), 0, np.sin(angy)],
[0, 1, 0],
[-np.sin(angy), 0, np.cos(angy)]])
Rz = np.array([[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1]])
R = np.dot(np.dot(Rz, Ry), Rx)
return R
def flip(self, axis=1):
r"""
Flip the mesh in a plane
Parameters
----------
axis: int, default 1
The axis in which to flip the mesh
"""
if isinstance(axis, int):
if 0 <= axis < 3: # Check axis is between 0-2
self.vert[:, axis] *= -1.0
# Switch face order to normals face same direction
self.faces[:, [1, 2]] = self.faces[:, [2, 1]]
self.calcNorm()
self.calcVNorm()
else:
raise ValueError("Expected axis to be within range 0-2 but found: {}".format(axis))
else:
raise TypeError("Expected axis to be int, but found: {}".format(type(axis)))
```
#### File: ampscan/ampscan/smooth.py
```python
import numpy as np
import copy
class smoothMixin(object):
def lp_smooth(self, n=1, brim = True):
r"""
Function to apply a Laplacian smooth to the mesh. This method replaces
each vertex with the mean of its connected neighbours
Parameters
----------
n: int, default 1
number of iterations of smoothing
"""
if brim is True:
eidx = (self.faceEdges == -99999).sum(axis=1).astype(bool)
vBrim = np.unique(self.edges[eidx, :])
else: vBrim = []
# Flatten the edges array to 1D
e = self.edges.flatten()
# Get the indicies to sort edges
o_idx = e.argsort()
# Get indicies of sorted array where last of each vertex index
# occurs
ndx = np.searchsorted(e[o_idx], np.arange(len(self.vert)),
side='right')
ndx = np.r_[0, ndx]
# Map indicies between flatted edges array and standard
row, col = np.unravel_index(o_idx, self.edges.shape)
for i in np.arange(n):
# List all vertices
vert = copy.deepcopy(self.vert)
neighVerts = vert[self.edges[row, 1-col], :]
vRange = np.arange(self.vert.shape[0])
log = np.isin(vRange, vBrim)
vRange = vRange[~log]
for j in vRange:
# Calculate the mean of the vertex set
self.vert[j, :] = neighVerts[ndx[j]:ndx[j+1]].mean(axis=0)
self.calcNorm()
self.calcVNorm()
def hc_smooth(self, n=1 ,beta=0.6, brim=True):
r"""
Function to apply a Humphrey’s Classes smooth to the mesh. Note, this assumes
that alpha=0 (ie the original point through the iteration has no effect).
If beta=1, then this effectively acts as the Laplacian smooth
Parameters
----------
n: int, default 1
number of iterations of smoothing
beta: float, default 0.6
scalar between [0, 1] which dictates influence of distance from adjacent to original point.
If beta=1, then this effectively acts as the Laplacian smooth
brim: bool, default True
If true, then this will not smooth the vertices on the brim
"""
if brim is True:
eidx = (self.faceEdges == -99999).sum(axis=1).astype(bool)
vBrim = np.unique(self.edges[eidx, :])
else: vBrim = []
# Flatten the edges array to 1D
e = self.edges.flatten()
# Get the indicies to sort edges
o_idx = e.argsort()
# Get indicies of sorted array where last of each vertex index
# occurs
ndx = np.searchsorted(e[o_idx], np.arange(len(self.vert)),
side='right')
ndx = np.r_[0, ndx]
# Map indicies between flatted edges array and standard
row, col = np.unravel_index(o_idx, self.edges.shape)
for i in np.arange(n):
# List all vertices
vert = copy.deepcopy(self.vert)
neighVerts = vert[self.edges[row, 1-col], :]
vRange = np.arange(self.vert.shape[0])
log = np.isin(vRange, vBrim)
vRange = vRange[~log]
for j in vRange:
# Get the adjacent vertices
adj = neighVerts[ndx[j]:ndx[j+1]]
# Get the original vertex
q = self.vert[j, :]
# calculate new Laplacian location
p = adj.mean(axis=0)
# Distance between Laplacian and original
b = p - q
# Mean distance adjacent between original
d = (adj - q).mean(axis=0)
# Based upon beta, get the updated location
self.vert[j, :] = q + beta*b - (1-beta)*d
self.calcNorm()
self.calcVNorm()
def smoothValues(self, n=1):
"""
Function to apply a simple Laplacian smooth to the values array.
Identical to the vertex smoothing except it applies the smoothing
to the values
Parameters
----------
n: int, default 1
number of iterations of smoothing
"""
# Flatten the edges array to 1D
e = self.edges.flatten()
# Get the indicies to sort edges
o_idx = e.argsort()
# Get indicies of sorted array where last of each vertex index
# occurs
ndx = np.searchsorted(e[o_idx], np.arange(len(self.values)),
side='right')
ndx = np.r_[0, ndx]
# Map indicies between flatted edges array and standard
row, col = np.unravel_index(o_idx, self.edges.shape)
for i in np.arange(n):
neighValues = self.values[self.edges[row, 1-col]]
for j in np.arange(self.values.shape[0]):
# Calculate mean of values set
self.values[j] = neighValues[ndx[j]:ndx[j+1]].mean()
```
#### File: ampscan/ampscan/trim.py
```python
import numpy as np
from numbers import Number
import os
from scipy import spatial
import copy
# Used by doc tests
filename = os.path.join(os.getcwd(), "tests", "stl_file.stl")
class trimMixin(object):
r"""
Methods for trimming the AmpObject mesh
"""
def planarTrim(self, height, plane = 2, above = True):
r"""
Trim the vertices using a flat plane, all vertices above plane will be
trimmed
Parameters
-----------
height: float
Trim height, values above this will be deleted
plane: int, default 2
plane for slicing
Examples
--------
>>> from ampscan import AmpObject
>>> amp = AmpObject(filename)
>>> amp.planarTrim(100, 2)
"""
if isinstance(height, Number) and isinstance(plane, int):
# planar values for each vert on face
fv = self.vert[self.faces, plane]
# Number points on each face are above cut plane
fvlogic = (fv > height).sum(axis=1)
# Faces with points both above and below cut plane
adjf = self.faces[np.logical_or(fvlogic == 2, fvlogic == 1)]
# Get adjacent vertices
adjv = np.unique(adjf)
# Get vert above height and set to height
abvInd = adjv[self.vert[adjv, plane] > height]
self.vert[abvInd, plane] = height
# Find all verts above plane
delv = self.vert[:, plane] > height
# Reorder verts to account for deleted one
vInd = np.cumsum(~delv) - 1
self.faces = self.faces[fvlogic != 3, :]
self.faces = vInd[self.faces]
self.vert = self.vert[~delv, :]
self.values = self.values[~delv]
self.calcStruct()
else:
raise TypeError("height arg must be a float")
def threePointTrim(self, p0, p1, p2, above = True):
r"""
Trim the vertices using a plane defined by three points. By default, all points
above the plane are deleted.
Parameters
-----------
p0: array_like
The co-ordinates of the first point to define the plane
p1: array_like
The co-ordinates of the second point to define the plane
p2: array_like
The co-ordinates of the third point to define the plane
Examples
--------
>>> from ampscan import AmpObject
>>> amp = AmpObject(filename)
>>> p0 = [50, 50, 0]
>>> p1 = [50, -50, -40]
>>> p2 = [-50, 50, 10]
>>> amp.threePointTrim(p0, p1, p2)
"""
# Ensure asarrays
p0 = np.asarray(p0)
p1 = np.asarray(p1)
p2 = np.asarray(p2)
# Calculate plane
v0 = p1 - p0
v1 = p2 - p0
c = np.cross(v0, v1)
c = c/np.linalg.norm(c)
k = -np.multiply(c, p0).sum()
# planar values for each vert on face
height = -(self.vert[:, 0]*c[0] + self.vert[:, 1]*c[1] + k)/c[2]
# Number points on each face are above cut plane
fv = self.vert[self.faces, 2]
fvHeight = height[self.faces]
fvlogic = (fv > fvHeight).sum(axis=1)
# Faces with points both above and below cut plane
adjf = self.faces[np.logical_or(fvlogic == 2, fvlogic == 1)]
# Get adjacent vertices
adjv = np.unique(adjf)
# Get vert above height and set to height
abvInd = adjv[self.vert[adjv, 2] > height[adjv]]
self.vert[abvInd, 2] = height[abvInd]
# Find all verts above plane
delv = self.vert[:, 2] > height
# Reorder verts to account for deleted one
vInd = np.cumsum(~delv) - 1
self.faces = self.faces[fvlogic != 3, :]
self.faces = vInd[self.faces]
self.vert = self.vert[~delv, :]
self.values = self.values[~delv]
self.calcStruct()
def dynamicTrim(self, s, maxdist = 20):
"""
This function trims vertices and faces from the AmpObject. It calculates
the distance between the AmpObject mesh centroids and their nearest neighbour
on the s mesh. If this distance is more than maxdist, the face is
removed, and subsequently the vertices no longer connected to a face.
Parameters
----------
s : AmpObject
The target object
maxdist : float
The threshold distance. Faces on the m mesh that have a higher
distance with their nearest neighbour on the s mesh than maxdist
will be removed, as will the vertices no longer connected to a
face afterwards.
"""
kdTree = spatial.cKDTree(s.vert)
fC = self.vert[self.faces].mean(axis=1)
[dist, idx] = kdTree.query(fC,1)
# faceid = np.arange(len(dist))[dist < maxdist]
# Find the faces with a centroid outside maxdist
self.faces = self.faces[dist <= maxdist, :]
# Index any vertices to keep
keepV = np.zeros([self.vert.shape[0]], dtype = bool)
keepV[np.unique(self.faces)] = 1
vInd = np.cumsum(keepV) - 1
# Set the vertices and faces
self.faces = vInd[self.faces]
self.vert = self.vert[keepV, :]
self.calcStruct()
```
#### File: ampscan/tests/test_trim.py
```python
import unittest
from util import get_path
import numpy as np
class TestTrim(unittest.TestCase):
def setUp(self):
"""Runs before each unit test
Sets up the AmpObject object using "stl_file.stl"
"""
from ampscan.core import AmpObject
stl_path = get_path("stl_file.stl")
self.amp = AmpObject(stl_path)
stl_path = get_path("stl_file_4.stl") # R=1.2
self.amp2 = AmpObject(stl_path)
def test_trim(self):
"""Tests the trim method of AmpObject for TypeErrors"""
# Testing that the method runs
self.amp.planarTrim(0.6, plane=2)
# Testing invalid data types raise TypeErrors
with self.assertRaises(TypeError):
self.amp.planarTrim(0.6, plane=[])
with self.assertRaises(TypeError):
self.amp.planarTrim(0.6, plane=0.9)
with self.assertRaises(TypeError):
self.amp.planarTrim([], plane=[])
def test_trim_2(self):
"""Tests the trim method of AmpObject by checking no vertices are above trim line"""
# Test no points are above 10
h = 10
self.amp.planarTrim(h, plane=2)
self.assertLessEqual(self.amp.vert[:, 2].max(), h)
# Test no points are above 0
h = 0
self.amp.planarTrim(h, plane=2)
self.assertLessEqual(self.amp.vert[:, 2].max(), h)
def test_trim_3(self):
"""Tests the trim method of AmpObject by checking no vertices are above trim line"""
# Test no points are above 10
p0 = np.array([50, 50, 0])
p1 = np.array([50, -50, -40])
p2 = np.array([-50, 50, 10])
v0 = p1 - p0
v1 = p2 - p0
c = np.cross(v0, v1)
c = c/np.linalg.norm(c)
k = -np.multiply(c, p0).sum()
# planar values for each vert on face
self.amp.threePointTrim(p0, p1, p2)
height = -(self.amp.vert[:, 0]*c[0] + self.amp.vert[:, 1]*c[0] + k)/c[2]
self.assertLessEqual(self.amp.vert[:, 2].max(), height.max())
def test_trim_3(self):
"""Tests the trim method of AmpObject by checking no vertices are above trim line"""
# Test no points are above 10
v = self.amp.vert.shape[0]
self.amp.dynamicTrim(self.amp2, 100)
self.assertLess(self.amp.vert.shape[0], v)
```
|
{
"source": "jerome89/graphene",
"score": 3
}
|
#### File: v1.9.1/es-index-migration/es-index-migration.py
```python
import sys
from getpass import getpass
try:
from elasticsearch import Elasticsearch, helpers
except ImportError:
raise ImportError("Please install python elasticsearch dependency.")
def migrate(es, tag_index, fetch_size, bulk_size):
print("Migrate documents in index: " + tag_index)
resp = es.search(
index=tag_index,
body={},
size=fetch_size,
scroll='60s',
request_timeout=30
)
if resp['hits']['total'] == 0:
print("SKIP! No documents to migrate.")
return
print("Migration will be done on total " + str(resp['hits']['total']) + " document(s) ...")
print("Fetching all documents from index: " + tag_index)
scroll_id = resp['_scroll_id']
docs_to_migrate = []
docs_to_migrate.extend(resp['hits']['hits'])
while len(resp['hits']['hits']):
resp = es.scroll(
scroll_id=scroll_id,
scroll='60s',
request_timeout=30
)
print("Fetched total " + str(len(docs_to_migrate)) + " document(s).")
docs_to_migrate.extend(resp['hits']['hits'])
migrate_docs(es, docs_to_migrate, bulk_size)
print("Finished migration in index: " + tag_index)
def migrate_docs(es, docs, bulk_size):
if len(docs) == 0:
return
bulk = []
total = len(docs)
count = 0
for doc in docs:
if count > 0 and count % bulk_size == 0:
print("Bulk updating ... (" + str(count) + "/" + str(total) + ")")
helpers.bulk(es, bulk)
bulk = []
if '_id' not in doc.keys() or '_type' not in doc.keys() or '_index' not in doc.keys():
print("Wrong doc!")
continue
doc_id = doc['_id']
doc_type = doc['_type']
doc_index = doc['_index']
tag_list = []
for tag in doc['_source'].keys():
if tag not in tag_list and not tag.startswith('@'):
tag_list.append(tag)
update_doc = {'_op_type': 'update', 'doc': {}, '_index': doc_index, '_id': doc_id, '_type': doc_type}
update_doc['doc']['@tags'] = tag_list
bulk.append(update_doc)
count = count + 1
if len(bulk) > 0:
print("Bulk updating remaining " + str(len(bulk)) + " document(s) ...")
helpers.bulk(es, bulk)
print("Finished migration for " + str(count) + " among total " + str(total) + " document(s)!")
if len(sys.argv) < 3:
raise Exception("Please provide elasticsearch host and port.")
es_host = str(sys.argv[1])
es_port = int(sys.argv[2])
es_username = str(input("Please enter the Elasticsearch username(leave empty for no authentication): "))
if len(es_username.strip()) != 0:
es_password = str(getpass("Please enter the Elasticsearch password: "))
if len(es_username.strip()) > 0:
es = Elasticsearch(
[es_host],
port=es_port,
http_auth=(es_username, es_password)
)
else:
es = Elasticsearch(
[es_host],
port=es_port
)
bulk_size = 10000
fetch_size = 10000
tag_indices = list(es.indices.get_alias("tag*").keys())
for tag_index in tag_indices:
migrate(es, tag_index, fetch_size, bulk_size)
print("Migration Complete!")
```
|
{
"source": "jerome-auguste/Movie-Selector",
"score": 3
}
|
#### File: jerome-auguste/Movie-Selector/sparql_queries.py
```python
from SPARQLWrapper import JSON
from utils import get_sparql, get_prefix, search, resp_format # , pprint
def get_movie(title: str = None,
director: str = None,
actor: str = None,
genre: str = None,
score: int = 0) -> list:
"""Result of queries movies matching some research criteria
(film title, director name, actor name, genre and/or minimum score)
Args:
title (str, optional): Searched title. Defaults to None.
director (str, optional): Searched director. Defaults to None.
actor (str, optional): Searched actor. Defaults to None.
genre (str, optional): Searched genre. Defaults to None.
score (int, optional): Minimum score. Defaults to 0.
Returns:
list: result of the queries
(list of movies with id, title, director's name,
score, poster if exists, actors list and genres list)
"""
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel ?directorLabel ?score
(SAMPLE(?poster) as ?poster)
(GROUP_CONCAT(DISTINCT ?actorLabel; separator=";") as ?actorsList)
(GROUP_CONCAT(DISTINCT ?genreLabel; separator=";") as ?genresList)
WHERE {{
?film wdt:P57 ?director;
wdt:P161 ?actor;
wdt:P136 ?genre;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P3383 ?poster }}
OPTIONAL {{?film wdt:P18 ?poster }}
OPTIONAL {{?film wdt:P154 ?poster }}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?director rdfs:label ?directorLabel.
?actor rdfs:label ?actorLabel.
?genre rdfs:label ?genreLabel.
}}
{search('?film', title)}
{search('?director', director)}
{search('?actor', actor)}
{search('?genre', genre)}
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score >= {score})
}}
GROUP BY ?film ?filmLabel ?directorLabel ?score
ORDER BY DESC(?score)
LIMIT 100
"""
# print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_topic(film: str, limit: int=20) -> list:
"""Movie recommandations based on common main subjects with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel ?topicLabel
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?topic
WHERE {{ wd:{film} wdt:P921 ?topic. }}
}}
?film wdt:P31 wd:Q11424;
wdt:P921 ?topic;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?topic rdfs:label ?topicLabel.
}}
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER (?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?topicLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
# print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_based_on(film: str, limit: int=20) -> list:
"""Movie recommandations based on same story with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, story on which the movie is based on,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel ?basedOnLabel
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originBasedOn
WHERE {{
OPTIONAL{{
wd:{film} wdt:P144 ?originBasedOn
}}
}}
}}
?film wdt:P31 wd:Q11424;
wdt:P136 ?genre;
wdt:P444 ?brutScore.
OPTIONAL{{?film wdt:P144 ?basedOn}}
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?basedOn rdfs:label ?basedOnLabel.
}}
FILTER (?basedOn IN (?originBasedOn))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score ?basedOnLabel
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
# print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_part_of_series(film: str, limit: int=20) -> list:
"""Movie recommandations from the same series with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, series title,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel ?seriesLabel
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?series
WHERE {{ wd:{film} wdt:P179 ?series. }}
}}
?film wdt:P31 wd:Q11424;
wdt:P179 ?series;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?basedOn rdfs:label ?basedOnLabel.
}}
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?seriesLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_genre(film: str, limit: int=20) -> list:
"""Movie recommandations based on common genres with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title,
number of awards recieved, score on Rotten Tomato and a "relevance score"
(genre list could not be displayed because of a timeout issue with wikidata)
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originGenre
WHERE {{ wd:{film} wdt:P136 ?originGenre . }}
}}
?film wdt:P31 wd:Q11424;
wdt:P136 ?genre;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?genre rdfs:label ?genreLabel.
}}
FILTER (?genre IN (?originGenre))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_performer(film: str, limit: int=20) -> list:
"""Movie recommandations having the same original soundtrack artist with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, list of performers (artists),
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(GROUP_CONCAT(DISTINCT ?performerLabel; separator="; ") AS ?performersList)
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originPerformer
WHERE {{ wd:{film} wdt:P175 ?originPerformer. }}
}}
?film wdt:P31 wd:Q11424;
wdt:P175 ?performer;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?performer rdfs:label ?performerLabel.
}}
FILTER (?performer IN (?originPerformer))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
def recommendation_inspiredby(film: str, limit: int=20) -> list:
"""Movie recommandations from the same inspiration with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, inspiration list,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(GROUP_CONCAT(DISTINCT ?inspiredbyLabel; separator="; ") AS ?inspiredbyList)
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originInspiredby
WHERE {{ wd:{film} wdt:P941 ?originInspiredby . }}
}}
?film wdt:P31 wd:Q11424;
wdt:P941 ?inspiredby;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?inspiredby rdfs:label ?inspiredbyLabel.
}}
FILTER (?inspiredby IN (?originInspiredby))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
# res = get_film(director="<NAME>")
# pprint(res)
# res = recommendation_genre('Q44578', 10)
# pprint(res)
```
|
{
"source": "jeromebchouinard/jbnetwork",
"score": 3
}
|
#### File: jeromebchouinard/jbnetwork/jbnetworkfactory.py
```python
import jbnetwork as jbn
def build_star_network(size):
"""Build a star network. Returns Network object."""
network = jbn.Network()
for i in range(1, size):
network.add_link(0, i)
return network
def build_chain_network(size):
"""Build a chain network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
network.add_link(i, i+1)
return network
def build_ring_network(size):
"""Build a ring network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
network.add_link(i, i+1)
network.add_link(0, size-1)
return network
def build_random_network(size, prob):
"""Build a random (Erdos-Renyi) network. Returns Network object."""
network = jbn.Network()
for i in range(size):
network.add_node(i)
for i in range(size-1):
for j in range(i+1, size):
if random.random() < prob:
network.add_link(i, j)
return network
def build_clique_network(size):
"""Build a clique network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
for j in range(i+1, size):
network.add_link(i, j)
return network
def build_hypercube_network(size):
"""Build a hypercube network. Returns Network object."""
# pylint: disable=missing-docstring
def _rec_build_hc_net(size):
if size == 1:
return {0:{}}
network = {}
network1 = _rec_build_hc_net(size/2)
for node1 in network1:
network[node1] = network1[node1]
network[node1 + size/2] = {}
for node2 in network1[node1]:
network[node1 + size/2][node2 + size/2] = 1
network[node1][node1 + size/2] = 1
network[node1 + size/2][node1] = 1
return network
# Find largest power of 2 <= size
pow2size = 2**int(math.log(size, 2))
network = _rec_build_hc_net(pow2size)
return Network(from_dict=network)
def build_grid_network(dim):
"""Build a grid network. Returns Network object.
arguments
dim -- (x, y) tuple of dimensions
"""
network = jbn.Network()
for node in range(size[0] * size[1]):
if (node+1) % size[0] != 0:
network.add_link(node, node+1)
if node < (size[1] - 1)*size[0]:
network.add_link(node, node+size[0])
return network
```
#### File: jeromebchouinard/jbnetwork/jbomap.py
```python
from jbheap import KeyValueHeap
class OrderedMap(object):
"""A mutable ordered sequence with optional labels.
Labels can be any hashable value and must be unique.
Note that if integers are used as labels, accessing
by label will not work, since omap[i] where i is an int
accesses by (positional) index.
Methods
append
insert
remove
remove_by_label
index
index_by_label
Supported operators
+ (concatenates)
Indexing with []
Slice indexing with [:]
Usage
omap = OMap({(0, 'foo'):'bar', (1, 'baz'):'banana'})
omap[0]
> 'bar'
omap['foo']
> 'bar'
omap[1] = 'apple'
omap['baz']
> 'apple'
"""
def __init__(self, map=None):
self.omap = []
self.lmap = KeyValueHeap()
if map is not None:
for key in map:
try:
self.insert(key[0], key[1], map[key])
except IndexError:
self.insert(key[0], None, map[key])
def __add__(self, operand2):
pass
def __len__(self):
pass
def __iter__(self):
pass
def append(self, val, label=None):
pass
def insert(self, ii, val, label=None):
if not hash(label) in self.lmap.keys():
self.omap.insert(ii, (val, label))
if label is not None:
self.lmap.insert((hash(label), val))
else:
raise AttributeError('Label already exists.')
def remove(self, ii):
pass
def remove_by_label(self, label):
pass
def index(self, ii):
pass
def index_by_label(self, label):
pass
```
#### File: jeromebchouinard/jbnetwork/jbutils.py
```python
import time
from pprint import pprint
import math
def partition(L, v):
"""
Partition list L at value V.
"""
left = []
right = []
for i in range(len(L)):
if L[i] < v:
left.append(L[i])
elif L[i] > v:
right.append(L[i])
return (left, v, right)
def top_k(L, k):
"""
Find the top k elements in list L.
"""
i = int(random.random() * len(L))
(left, v, right) = partition(L, L[i])
if len(left) == k:
return left
if len(left) == k-1:
return left + [v]
if len(left) < k:
return left + [v] + top_k(right, k - (len(left) + 1))
else:
return top_k(left, k)
def timeit(f):
"""
Modify a function to print cpu and wall-clock elapsed time when called.
Can be used as a decorator:
@timeit
def func(x):
...
"""
def g(*args, **kwargs):
start_etime = time.perf_counter()
start_cputime = time.process_time()
rvalue = f(*args, **kwargs)
end_etime = time.perf_counter()
end_cputime = time.process_time()
print('elapsed time (s): ', end_etime - start_etime)
print('cpu time (s)', end_cputime - start_cputime)
return rvalue
return g
def profile(func, input_gen, max_time=5, max_n=2**20, start_n=1, keep_returns=False):
"""
Time a function for different input sizes and check if O(n^2), O(n), or O(log(n))
"""
runtimes = []
returns = [] if keep_returns else None
last_runtime = 0
n = start_n
max_time_ms = max_time * 1000
input_sizes = []
while last_runtime <= max_time_ms and n <= max_n:
inp = input_gen(n)
start_time = time.process_time()
rvalue = func(inp)
end_time = time.process_time()
last_runtime = end_time - start_time
runtimes.append(last_runtime)
input_sizes.append(n)
if keep_returns:
returns.append(rvalue)
n *= 2
lognfactors = []
nfactors = []
n2factors = []
for i in range(1,len(runtimes)):
lognfactors.append(runtimes[i]-runtimes[i-1])
nfactors.append(runtimes[i]/runtimes[i-1])
n2factors.append(math.sqrt(runtimes[i])/math.sqrt(runtimes[i-1]))
print('If func is O(log(n)) these numbers should be the same:')
pprint(lognfactors)
print('\n')
print('If func is O(n) these numbers should be the same:')
pprint(nfactors)
print('\n')
print('If func is O(n^2) these numbers should be the same: ')
pprint(n2factors)
print('\n')
return (input_sizes, runtimes, returns)
```
|
{
"source": "JeromeBlanchet/Neuraxle",
"score": 3
}
|
#### File: neuraxle/hyperparams/distributions.py
```python
import copy
import math
import random
import sys
from abc import abstractmethod, ABCMeta
from typing import List
import numpy as np
class HyperparameterDistribution(metaclass=ABCMeta):
"""Base class for other hyperparameter distributions."""
def __init__(self):
"""
Create a HyperparameterDistribution. This method should still be called with super if it gets overriden.
"""
self.first_id = id(self)
@abstractmethod
def rvs(self):
"""
Sample the random variable.
:return: The randomly sampled value.
"""
pass
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.0) -> 'HyperparameterDistribution':
"""
Takes a value that is estimated to be the best one of the space, and restrict the space near that value.
By default, this function will completely replace the returned value by the new guess if not overriden.
:param best_guess: the value towards which we want to narrow down the space.
:param kept_space_ratio: what proportion of the space is kept. Should be between 0.0 and 1.0. Default is to keep only the best_guess (0.0).
:return: a new HyperparameterDistribution object that has been narrowed down.
"""
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
def was_narrowed_from(
self, kept_space_ratio: float, original_hp: 'HyperparameterDistribution'
) -> 'HyperparameterDistribution':
"""
Keep track of the original distribution to restore it.
:param kept_space_ratio: the ratio which made the current object narrower than the ``original_hp``.
:param original_hp: The original HyperparameterDistribution, which will be kept in a private variable for an eventual restore.
:return: self.
"""
self.kept_space_ratio_trace = (
self.get_current_narrowing_value() *
kept_space_ratio *
original_hp.get_current_narrowing_value()
)
self.original_hp: HyperparameterDistribution = original_hp.unnarrow()
return self
def get_current_narrowing_value(self):
if not hasattr(self, 'kept_space_ratio_trace'):
self.kept_space_ratio_trace: float = 1.0
return self.kept_space_ratio_trace
def unnarrow(self) -> 'HyperparameterDistribution':
"""
Return the original distribution before narrowing of the distribution. If the distribution was never narrowed,
will return a copy of self.
:return: the original HyperparameterDistribution before narrowing, or else self if the distribution is virgin.
"""
if not hasattr(self, 'original_hp'):
return copy.deepcopy(self)
return copy.deepcopy(self.original_hp.unnarrow())
def __eq__(self, other):
return self.first_id == other.first_id
class FixedHyperparameter(HyperparameterDistribution):
"""This is an hyperparameter that won't change again, but that is still expressed as a distribution."""
def __init__(self, value):
"""
Create a still hyperparameter
:param value: what will be returned by calling ``.rvs()``.
"""
self.value = value
super(FixedHyperparameter, self).__init__()
def rvs(self):
"""
Sample the non-random anymore value.
:return: the value given at creation.
"""
return self.value
# TODO: Mixin this or something:
# class DelayedAdditionOf(MalleableDistribution):
# """A HyperparameterDistribution (MalleableDistribution mixin) that """
#
# def __init__(self, *dists):
# self.dists = dists
#
# def rvs(self):
# rvss = [d.rvs if hasattr(d, 'rvs') else d for d in self.dists]
# return sum(rvss)
#
#
# class MalleableDistribution(metaclass=ABCMeta):
# """An hyperparameter distribution to which it's possible to do additional math using defaut python operators."""
#
# def __add__(self, other):
# return DelayedAdditionOf(self, other)
#
# max min + - / * % ** // == != < > <= >=
#
class Boolean(HyperparameterDistribution):
"""Get a random boolean hyperparameter."""
def rvs(self):
"""
Get a random True or False.
:return: True or False (random).
"""
return random.choice([True, False])
class Choice(HyperparameterDistribution):
"""Get a random value from a choice list of possible value for this hyperparameter.
When narrowed, the choice will only collapse to a single element when narrowed enough.
For example, if there are 4 items in the list, only at a narrowing value of 0.25 that
the first item will be kept alone.
"""
def __init__(self, choice_list: List):
"""
Create a random choice hyperparameter from the given list.
:param choice_list: a list of values to sample from.
"""
self.choice_list = choice_list
super(Choice, self).__init__()
def rvs(self):
"""
Get one of the items randomly.
:return: one of the items of the list.
"""
return random.choice(self.choice_list)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.0) -> HyperparameterDistribution:
"""
Will narrow the space. If the cumulative kept_space_ratio gets to be under or equal to 1/len(choice_list),
then the list is crunched to a single item as a FixedHyperparameter to reflect this narrowing.
So once a small enough kept_space_ratio is reached, the list becomes a fixed unique item from the best guess.
Otherwise, a deepcopy of self is returned.
:param best_guess: the best item of the list to keep if truly narrowing.
:param kept_space_ratio: the ratio of the space to keep.
:return: a deepcopy of self, or else a FixedHyperparameter of the best_guess.
"""
new_narrowing = self.get_current_narrowing_value() * kept_space_ratio
if len(self.choice_list) == 0 or len(self.choice_list) == 1 or new_narrowing <= 1.0 / len(self.choice_list):
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return copy.deepcopy(self).was_narrowed_from(kept_space_ratio, self)
def __len__(self):
"""
Return the number of choices.
:return: the number of choices.
"""
return len(self.choice_list)
class PriorityChoice(HyperparameterDistribution):
"""Get a random value from a choice list of possible value for this hyperparameter.
The first parameters are kept until the end when the list is narrowed (it is narrowed progressively),
unless there is a best guess that surpasses some of the top choices.
"""
def __init__(self, choice_list: List):
"""
Create a random choice hyperparameter from the given list (choice_list).
The first parameters in the choice_list will be kept longer when narrowing the space.
:param choice_list: a list of values to sample from. First placed, first kept when space is narrowed.
"""
self.choice_list = choice_list
super(PriorityChoice, self).__init__()
def rvs(self):
"""
Get one of the items randomly.
:return: one of the items of the list.
"""
return random.choice(self.choice_list)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.0) -> HyperparameterDistribution:
"""
Will narrow the space. If the cumulative kept_space_ratio gets to be under or equal to 1-1/len(choice_list),
then the list is crunched to discard the last items to reflect this narrowing.
After a few narrowing (or a big one), the list may become a FixedHyperparameter.
Otherwise if the list is unchanged, a deepcopy of self is returned.
:param best_guess: the best item of the list, which will be brought back as the first item.
:param kept_space_ratio: the ratio of the space to keep.
:return: a deepcopy of self, or a subchoice of self, or else a FixedHyperparameter of the best_guess.
"""
new_size = int(len(self) * kept_space_ratio + sys.float_info.epsilon)
if (
len(self.choice_list) == 0
or len(self.choice_list) == 1
or new_size <= 1
or kept_space_ratio <= 1.0 / len(self.choice_list)
):
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
# Bring best_guess to front
idx = self.choice_list.index(best_guess)
del self.choice_list[idx]
self.choice_list = [best_guess] + self.choice_list
# Narrowing of the list.
maybe_reduced_list = self.choice_list[:new_size]
return PriorityChoice(maybe_reduced_list).was_narrowed_from(kept_space_ratio, self)
def __len__(self):
"""
Return the number of choices.
:return: the number of choices.
"""
return len(self.choice_list)
class WrappedHyperparameterDistributions(HyperparameterDistribution):
def __init__(self, hd: HyperparameterDistribution = None, hds: List[HyperparameterDistribution] = None):
"""
Create a wrapper that will surround another HyperparameterDistribution.
The wrapper might use one (hd) and/or many (hds) HyperparameterDistribution depending on the argument(s) used.
:param hd: the other HyperparameterDistribution to wrap.
:param hds: the others HyperparameterDistribution to wrap.
"""
self.hd: HyperparameterDistribution = hd
self.hds: List[HyperparameterDistribution] = hds
super(WrappedHyperparameterDistributions, self).__init__()
def __repr__(self):
return self.__class__.__name__ + "(" + repr(self.hd) + ", hds=" + repr(self.hds) + ")"
def __str__(self):
return self.__class__.__name__ + "(" + str(self.hd) + ", hds=" + str(self.hds) + ")"
class Quantized(WrappedHyperparameterDistributions):
"""A quantized wrapper for another distribution: will round() the rvs number."""
def rvs(self) -> int:
"""
Will return an integer, rounded from the output of the previous distribution.
:return: an integer.
"""
return round(self.hd.rvs())
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> 'Quantized':
"""
Will narrow the underlying distribution and re-wrap it under a Quantized.
:param best_guess: the value towards which we want to narrow down the space.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return:
"""
return Quantized(
self.hd.narrow_space_from_best_guess(best_guess, kept_space_ratio)
).was_narrowed_from(kept_space_ratio, self)
class RandInt(HyperparameterDistribution):
"""Get a random integer within a range"""
def __init__(self, min_included: int, max_included: int):
"""
Create a quantized random uniform distribution.
A random integer between the two values inclusively will be returned.
:param min_included: minimum integer, included.
:param max_included: maximum integer, included.
"""
self.min_included = min_included
self.max_included = max_included
super(RandInt, self).__init__()
def rvs(self) -> int:
"""
Will return an integer in the specified range as specified at creation.
:return: an integer.
"""
return random.randint(self.min_included, self.max_included)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> HyperparameterDistribution:
"""
Will narrow the underlying distribution towards the best guess.
:param best_guess: the value towards which we want to narrow down the space. Should be between 0.0 and 1.0.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return: a new HyperparameterDistribution that has been narrowed down.
"""
lost_space_ratio = 1.0 - kept_space_ratio
new_min_included = round(self.min_included * kept_space_ratio + best_guess * lost_space_ratio)
new_max_included = round(self.max_included * kept_space_ratio + best_guess * lost_space_ratio)
if new_max_included <= new_min_included or kept_space_ratio == 0.0:
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return RandInt(new_min_included, new_max_included).was_narrowed_from(kept_space_ratio, self)
class Uniform(HyperparameterDistribution):
"""Get a uniform distribution."""
def __init__(self, min_included: int, max_included: int):
"""
Create a random uniform distribution.
A random float between the two values somehow inclusively will be returned.
:param min_included: minimum integer, included.
:param max_included: maximum integer, might be included - for more info, see https://docs.python.org/2/library/random.html#random.uniform
"""
self.min_included = min_included
self.max_included = max_included
super(Uniform, self).__init__()
def rvs(self) -> float:
"""
Will return a float value in the specified range as specified at creation.
:return: a float.
"""
return random.random() * (self.max_included - self.min_included) + self.min_included
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> HyperparameterDistribution:
"""
Will narrow the underlying distribution towards the best guess.
:param best_guess: the value towards which we want to narrow down the space. Should be between 0.0 and 1.0.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return: a new HyperparameterDistribution that has been narrowed down.
"""
lost_space_ratio = 1.0 - kept_space_ratio
new_min_included = self.min_included * kept_space_ratio + best_guess * lost_space_ratio
new_max_included = self.max_included * kept_space_ratio + best_guess * lost_space_ratio
if new_max_included <= new_min_included or kept_space_ratio == 0.0:
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return Uniform(new_min_included, new_max_included).was_narrowed_from(kept_space_ratio, self)
class LogUniform(HyperparameterDistribution):
"""Get a LogUniform distribution.
For example, this is good for neural networks' learning rates: that vary exponentially."""
def __init__(self, min_included: float, max_included: float):
"""
Create a quantized random log uniform distribution.
A random float between the two values inclusively will be returned.
:param min_included: minimum integer, should be somehow included.
:param max_included: maximum integer, should be somehow included.
"""
self.log2_min_included = math.log2(min_included)
self.log2_max_included = math.log2(max_included)
super(LogUniform, self).__init__()
def rvs(self) -> float:
"""
Will return a float value in the specified range as specified at creation.
:return: a float.
"""
return 2 ** random.uniform(self.log2_min_included, self.log2_max_included)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> HyperparameterDistribution:
"""
Will narrow, in log space, the distribution towards the new best_guess.
:param best_guess: the value towards which we want to narrow down the space. Should be between 0.0 and 1.0.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return: a new HyperparameterDistribution that has been narrowed down.
"""
log2_best_guess = math.log2(best_guess)
lost_space_ratio = 1.0 - kept_space_ratio
new_min_included = self.log2_min_included * kept_space_ratio + log2_best_guess * lost_space_ratio
new_max_included = self.log2_max_included * kept_space_ratio + log2_best_guess * lost_space_ratio
if new_max_included <= new_min_included or kept_space_ratio == 0.0:
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return LogUniform(2 ** new_min_included, 2 ** new_max_included).was_narrowed_from(kept_space_ratio, self)
class Normal(HyperparameterDistribution):
"""Get a normal distribution."""
def __init__(self, mean: float, std: float,
hard_clip_min: float = None, hard_clip_max: float = None):
"""
Create a normal distribution from mean and standard deviation.
:param mean: the most common value to pop
:param std: the standard deviation (that is, the sqrt of the variance).
:param hard_clip_min: if not none, rvs will return max(result, hard_clip_min).
:param hard_clip_max: if not none, rvs will return min(result, hard_clip_min).
"""
self.mean = mean,
self.std = std
self.hard_clip_min = hard_clip_min
self.hard_clip_max = hard_clip_max
super(Normal, self).__init__()
def rvs(self) -> float:
"""
Will return a float value in the specified range as specified at creation.
:return: a float.
"""
result = float(np.random.normal(self.mean, self.std))
if not math.isfinite(result):
return self.rvs()
# TODO: replace hard_clip with malleable max and min? also remove in doc if so (search for "hard clip").
if self.hard_clip_max is not None:
result = min(result, self.hard_clip_max)
if self.hard_clip_min is not None:
result = max(result, self.hard_clip_min)
return float(result)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> HyperparameterDistribution:
"""
Will narrow the distribution towards the new best_guess.
The mean will move towards the new best guess, and the standard deviation
will be multiplied by the kept_space_ratio.
The hard clip limit is unchanged.
:param best_guess: the value towards which we want to narrow down the space's mean. Should be between 0.0 and 1.0.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return: a new HyperparameterDistribution that has been narrowed down.
"""
lost_space_ratio = 1.0 - kept_space_ratio
if isinstance(self.mean, tuple):
self.mean = self.mean[0]
new_mean = self.mean * kept_space_ratio + best_guess * lost_space_ratio
new_std = self.std * kept_space_ratio
if new_std <= 0.0:
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return Normal(
new_mean, new_std, self.hard_clip_min, self.hard_clip_max
).was_narrowed_from(kept_space_ratio, self)
class LogNormal(HyperparameterDistribution):
"""Get a LogNormal distribution."""
def __init__(self, log2_space_mean: float, log2_space_std: float,
hard_clip_min: float = None, hard_clip_max: float = None):
"""
Create a LogNormal distribution.
:param log2_space_mean: the most common value to pop, but before taking 2**value.
:param log2_space_std: the standard deviation of the most common value to pop, but before taking 2**value.
:param hard_clip_min: if not none, rvs will return max(result, hard_clip_min). This value is not checked in logspace (so it is checked after the exp).
:param hard_clip_max: if not none, rvs will return min(result, hard_clip_min). This value is not checked in logspace (so it is checked after the exp).
"""
self.log2_space_mean = log2_space_mean
self.log2_space_std = log2_space_std
self.hard_clip_min = hard_clip_min
self.hard_clip_max = hard_clip_max
super(LogNormal, self).__init__()
def rvs(self) -> float:
"""
Will return a float value in the specified range as specified at creation.
Note: the range at creation was in log space. The return value is after taking an exponent.
:return: a float.
"""
result = 2 ** float(np.random.normal(self.log2_space_mean, self.log2_space_std))
if not math.isfinite(result):
return self.rvs()
if self.hard_clip_max is not None:
result = min(result, self.hard_clip_max)
if self.hard_clip_min is not None:
result = max(result, self.hard_clip_min)
return float(result)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.5) -> HyperparameterDistribution:
"""
Will narrow the distribution towards the new best_guess.
The log2_space_mean (log space mean) will move, in log space, towards the new best guess, and the
log2_space_std (log space standard deviation) will be multiplied by the kept_space_ratio.
:param best_guess: the value towards which we want to narrow down the space's mean. Should be between 0.0 and 1.0.
:param kept_space_ratio: what proportion of the space is kept. Default is to keep half the space (0.5).
:return: a new HyperparameterDistribution that has been narrowed down.
"""
log2_best_guess = math.log2(best_guess)
lost_space_ratio = 1.0 - kept_space_ratio
new_mean = self.log2_space_mean * kept_space_ratio + log2_best_guess * lost_space_ratio
new_std = self.log2_space_std * kept_space_ratio
if new_std <= 0.0:
return FixedHyperparameter(best_guess).was_narrowed_from(kept_space_ratio, self)
return Normal(
new_mean, new_std, self.hard_clip_min, self.hard_clip_max
).was_narrowed_from(kept_space_ratio, self)
```
#### File: neuraxle/hyperparams/space.py
```python
from collections import OrderedDict
from neuraxle.hyperparams.distributions import HyperparameterDistribution
PARAMS_SPLIT_SEQ = "__"
def nested_dict_to_flat(nested_hyperparams, dict_ctor=OrderedDict):
"""
Convert a nested hyperparameter dictionary to a flat one.
:param nested_hyperparams: a nested hyperparameter dictionary.
:param dict_ctor: ``OrderedDict`` by default. Will use this as a class to create the new returned dict.
:return: a flat hyperparameter dictionary.
"""
ret = dict_ctor()
for k, v in nested_hyperparams.items():
if isinstance(v, dict) or isinstance(v, OrderedDict) or isinstance(v, dict_ctor):
_ret = nested_dict_to_flat(v)
for key, val in _ret.items():
ret[k + PARAMS_SPLIT_SEQ + key] = val
else:
ret[k] = v
return ret
def flat_to_nested_dict(flat_hyperparams, dict_ctor=OrderedDict):
"""
Convert a flat hyperparameter dictionary to a nested one.
:param flat_hyperparams: a flat hyperparameter dictionary.
:param dict_ctor: ``OrderedDict`` by default. Will use this as a class to create the new returned dict.
:return: a nested hyperparameter dictionary.
"""
pre_ret = dict_ctor()
ret = dict_ctor()
for k, v in flat_hyperparams.items():
k, _, key = k.partition(PARAMS_SPLIT_SEQ)
if len(key) > 0:
if k not in pre_ret.keys():
pre_ret[k] = dict_ctor()
pre_ret[k][key] = v
else:
ret[k] = v
for k, v in pre_ret.items():
ret[k] = flat_to_nested_dict(v)
return ret
class HyperparameterSamples(OrderedDict):
"""Wraps an hyperparameter nested dict or flat dict, and offer a few more functions.
This can be set on a Pipeline with the method ``set_hyperparams``.
HyperparameterSamples are often the result of calling ``.rvs()`` on an HyperparameterSpace."""
def to_flat(self) -> 'HyperparameterSamples':
"""
Will create an equivalent flat HyperparameterSamples.
:return: an HyperparameterSamples like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=HyperparameterSamples)
def to_nested_dict(self) -> 'HyperparameterSamples':
"""
Will create an equivalent nested dict HyperparameterSamples.
:return: an HyperparameterSamples like self, as a nested dict.
"""
return flat_to_nested_dict(self, dict_ctor=HyperparameterSamples)
def to_flat_as_dict_primitive(self) -> dict:
"""
Will create an equivalent flat HyperparameterSpace, as a dict.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=dict)
def to_nested_dict_as_dict_primitive(self) -> dict:
"""
Will create an equivalent nested dict HyperparameterSpace, as a dict.
:return: a nested primitive dict type of self.
"""
return flat_to_nested_dict(self, dict_ctor=dict)
def to_flat_as_ordered_dict_primitive(self) -> OrderedDict:
"""
Will create an equivalent flat HyperparameterSpace, as a dict.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=OrderedDict)
def to_nested_dict_as_ordered_dict_primitive(self) -> OrderedDict:
"""
Will create an equivalent nested dict HyperparameterSpace, as a dict.
:return: a nested primitive dict type of self.
"""
return flat_to_nested_dict(self, dict_ctor=OrderedDict)
class HyperparameterSpace(HyperparameterSamples):
"""Wraps an hyperparameter nested dict or flat dict, and offer a few more functions to process
all contained HyperparameterDistribution.
This can be set on a Pipeline with the method ``set_hyperparams_space``.
Calling ``.rvs()`` on an ``HyperparameterSpace`` results in ``HyperparameterSamples``."""
def rvs(self) -> 'HyperparameterSpace':
"""
Sample the space of random variables.
:return: a random HyperparameterSamples, sampled from a point of the present HyperparameterSpace.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
v = v.rvs()
new_items.append((k, v))
return HyperparameterSpace(new_items)
def narrow_space_from_best_guess(
self, best_guesses: 'HyperparameterSpace', kept_space_ratio: float = 0.5
) -> 'HyperparameterSpace':
"""
Takes samples estimated to be the best ones of the space as of yet, and restrict the whole space towards that.
:param best_guess: sampled HyperparameterSpace (the result of rvs on each parameter, but still stored as a HyperparameterSpace).
:param kept_space_ratio: what proportion of the space is kept. Should be between 0.0 and 1.0. Default is 0.5.
:return: a new HyperparameterSpace containing the narrowed HyperparameterDistribution objects.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
best_guess_v = best_guesses[k]
v = v.narrow_space_from_best_guess(best_guess_v, kept_space_ratio)
new_items.append((k, v))
return HyperparameterSpace(new_items)
def unnarrow(self) -> 'HyperparameterSpace':
"""
Return the original space before narrowing of the distribution. If the distribution was never narrowed,
the values in the dict will be copies.
:return: the original HyperparameterSpace before narrowing.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
v = v.unnarrow()
new_items.append((k, v))
return HyperparameterSpace(new_items)
def to_flat(self) -> 'HyperparameterSpace':
"""
Will create an equivalent flat HyperparameterSpace.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=HyperparameterSpace)
def to_nested_dict(self) -> 'HyperparameterSpace':
"""
Will create an equivalent nested dict HyperparameterSpace.
:return: an HyperparameterSpace like self, as a nested dict.
"""
return flat_to_nested_dict(self, dict_ctor=HyperparameterSpace)
```
#### File: testing/steps/test_utils.py
```python
import copy
import numpy as np
from neuraxle.pipeline import Pipeline
from neuraxle.steps.util import TapeCallbackFunction, TransformCallbackStep, StepClonerForEachDataInput
from neuraxle.union import Identity, AddFeatures
def test_tape_callback():
expected_tape = ["1", "2", "3", "a", "b", "4"]
tape = TapeCallbackFunction()
p = Pipeline([
Identity(),
TransformCallbackStep(tape.callback, ["1"]),
TransformCallbackStep(tape.callback, ["2"]),
TransformCallbackStep(tape.callback, ["3"]),
AddFeatures([
TransformCallbackStep(tape.callback, ["a"]),
TransformCallbackStep(tape.callback, ["b"]),
]),
TransformCallbackStep(tape.callback, ["4"]),
Identity()
])
p.fit_transform(np.ones((1, 1)))
assert tape.get_name_tape() == expected_tape
def test_step_cloner():
tape = TapeCallbackFunction()
data = [[1], [2], [3]]
sc = StepClonerForEachDataInput(TransformCallbackStep(tape, ["-"]), copy_op=copy.copy)
sc.fit_transform(data)
print(tape)
print(tape.get_name_tape())
print(tape.get_data())
assert tape.get_data() == data
assert tape.get_name_tape() == ["-"] * 3
```
|
{
"source": "JeromeBriot/fusion360-open-folders",
"score": 2
}
|
#### File: JeromeBriot/fusion360-open-folders/OpenFolders.py
```python
import adsk.core, adsk.fusion, traceback # pylint: disable=import-error
import platform
import os
import plistlib
import subprocess
import json
import re
thisAddinName = 'OpenFolders'
thisAddinTitle = 'Open Folders'
thisAddinVersion = '0.4.0'
thisAddinAuthor = '<NAME>'
thisAddinContact = '<EMAIL>'
thisFilePath = os.path.join(os.path.dirname(os.path.realpath(__file__)))
app = adsk.core.Application.get()
ui = app.userInterface
# https://forums.autodesk.com/t5/fusion-360-api-and-scripts/api-bug-cannot-click-menu-items-in-nested-dropdown/m-p/9669144#M10876
nestedMenuBugFixed = False
showUndocumentedFolders = True
controls = {
'titles': [],
'ids': [],
'parentsIds': [],
'types': [],
'paths': [],
'separators': [],
'icons': []
}
undocumentedControls = {
'titles': [],
'ids': [],
'parentsIds': [],
'types': [],
'paths': [],
'separators': [],
'icons': []
}
handlers = []
def getDefaultControls():
global controls
if platform.system() == 'Windows':
desktopPath = os.path.join(os.getenv('USERPROFILE'), 'Desktop')
# https://stackoverflow.com/questions/2014554/find-the-newest-folder-in-a-directory-in-python
directory = os.path.join(os.getenv('LOCALAPPDATA'), 'Autodesk', 'webdeploy', 'production')
fusion360Install = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getctime)
fusion360ApiCpp = os.path.join(fusion360Install, 'CPP')
fusion360ApiPython = os.path.join(fusion360Install, 'Api', 'Python')
fusion360Python = os.path.join(fusion360Install, 'Python')
autodeskLocal = os.path.join(os.getenv('LOCALAPPDATA'), 'Autodesk')
autodeskRoaming = os.path.join(os.getenv('APPDATA'), 'Autodesk')
controls = {
'titles': [
'Install',
'API',
'C++',
'Python',
'Python',
'Autodesk (Roaming)',
'Autodesk (Local)',
'Desktop',
'Appdata (Roaming)',
'Appdata (Local)',
'Temp',
'Preferences'
],
'ids': [
'Fusion360Install',
'Fusion360Api',
'Fusion360ApiCpp',
'Fusion360ApiPython',
'Fusion360Python',
'AutodeskRoaming',
'AutodeskLocal',
'WindowsDesktop',
'WindowsAppdataRoaming',
'WindowsAppdataLocal',
'WindowsTemp',
'Preferences'
],
'parentsIds': [
'root',
'root',
'Fusion360Api',
'Fusion360Api',
'root',
'root',
'root',
'root',
'root',
'root',
'root',
'root'
],
'types': [
'command',
'dropdown',
'command',
'command',
'command',
'command',
'command',
'command',
'command',
'command',
'command',
'command'
],
'paths': [
fusion360Install,
None,
fusion360ApiCpp,
fusion360ApiPython,
fusion360Python,
autodeskRoaming,
autodeskLocal,
os.path.join(os.getenv('USERPROFILE'), 'Desktop'),
os.path.join(os.getenv('APPDATA')),
os.path.join(os.getenv('LOCALAPPDATA')),
os.path.join(os.getenv('TMP')),
getUserDataPath()
],
'separators': [False, False, False, False, True, False, True, False, False, False, True, True],
'icons': [
'fusion360',
'fusion360',
'fusion360',
'fusion360',
'fusion360',
'autodesk',
'autodesk',
'windows',
'windows',
'windows',
'windows',
''
]}
# if not nestedMenuBugFixed:
# controls['separators'][1] = True
else:
userPath = os.path.expanduser('~')
desktopPath = os.path.join(userPath, 'Desktop')
autodeskPath = os.path.join(userPath, 'Library', 'Application Support', 'Autodesk')
fusionAppPath = os.path.realpath(os.path.join(autodeskPath, 'webdeploy', 'production', 'Autodesk Fusion 360.app'))
fusion360Install = os.path.join(fusionAppPath, 'Contents')
fusion360ApiCpp = os.path.join(fusion360Install, 'Libraries', 'Neutron', 'CPP')
fusion360ApiPython = os.path.join(fusion360Install, 'Api', 'Python')
fusion360Python = os.path.join(fusion360Install, 'Frameworks', 'Python.framework', 'Versions')
controls = {
'titles': [
'Install',
'API',
'C++',
'Python',
'Python',
'Autodesk',
'Desktop',
'Preferences'
],
'ids': [
'Fusion360Install',
'Fusion360Api',
'Fusion360ApiCpp',
'Fusion360ApiPython',
'Fusion360Python',
'Autodesk',
'Desktop',
'Preferences'
],
'parentsIds': [
'root',
'root',
'Fusion360Api',
'Fusion360Api',
'root',
'root',
'root',
'root'
],
'types': [
'command',
'dropdown',
'command',
'command',
'command',
'command',
'command',
'command'
],
'paths': [
fusion360Install,
None,
fusion360ApiCpp,
fusion360ApiPython,
fusion360Python,
autodeskPath,
desktopPath,
getUserDataPath()
],
'separators': [False, False, False, False, True, True, True, True],
'icons': [
'fusion360',
'fusion360',
'fusion360',
'fusion360',
'fusion360',
'autodesk',
'macos',
''
]
}
# if not nestedMenuBugFixed:
# controls['separators'][1] = True
def getUndocumentedControls():
global undocumentedControls
if not nestedMenuBugFixed:
undocumentedControls = {
'titles': [],
'ids' : [],
'parentsIds': [],
'types': [],
'paths': [],
'separators': [],
'icons': []
}
else:
idx = 4
pathsDict = json.loads(app.executeTextCommand('Paths.Get'))
if nestedMenuBugFixed:
controls['titles'].insert(idx, 'Undocumented')
controls['ids'].insert(idx, 'Undocumented')
controls['parentsIds'].insert(idx, 'root')
controls['types'].insert(idx, 'dropdown')
controls['paths'].insert(idx, None)
controls['separators'].insert(idx, True)
controls['icons'].insert(idx, 'fusion360')
for key in pathsDict.keys():
if key != 'isInstalledBuild':
pn = ' '.join(re.findall(r'[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))', key[0].upper() + key[1:]))
if pathsDict[key].startswith('Auto-save location is '):
pp = pathsDict[key].replace('Auto-save location is ', '')
else:
pp = pathsDict[key]
if key == 'AppLogFilePath':
pp = os.path.dirname(pp)
if not pp.endswith('/'):
pp += '/'
if nestedMenuBugFixed:
idx += 1
controls['titles'].insert(idx, pn)
controls['ids'].insert(idx, pn.replace(' ', ''))
controls['parentsIds'].insert(idx, 'Undocumented')
controls['types'].insert(idx, 'command')
controls['paths'].insert(idx, pp)
controls['separators'].insert(idx, False)
controls['icons'].insert(idx, 'fusion360')
else:
undocumentedControls['titles'].append(pn)
undocumentedControls['ids'].append(pn.replace(' ', ''))
undocumentedControls['parentsIds'].append('root')
undocumentedControls['types'].append('command')
undocumentedControls['paths'].append(pp)
undocumentedControls['separators'].append(False)
undocumentedControls['icons'].append('fusion360')
def getCustomControls():
userDataPath = getUserDataPath()
customPathFile = os.path.join(userDataPath, 'customPaths.json')
if not os.path.exists(customPathFile):
createJsonFiles(customPathFile)
else:
with open(customPathFile, 'r') as file:
customControls = json.load(file)
controls['titles'] = controls['titles'][0:-1] + customControls['titles'] + [controls['titles'][-1]]
controls['ids'] = controls['ids'][0:-1] + customControls['ids'] + [controls['ids'][-1]]
controls['parentsIds'] = controls['parentsIds'][0:-1] + customControls['parentsIds'] + [controls['parentsIds'][-1]]
controls['types'] = controls['types'][0:-1] + customControls['types'] + [controls['types'][-1]]
controls['paths'] = controls['paths'][0:-1] + customControls['paths'] + [controls['paths'][-1]]
controls['separators'] = controls['separators'][0:-1] + customControls['separators'] + [controls['separators'][-1]]
controls['icons'] = controls['icons'][0:-1] + customControls['icons'] + [controls['icons'][-1]]
def checkResources():
global controls
for i in range(0, len(controls['icons'])):
if controls['icons'][i] != '':
resourcePath = os.path.join(thisFilePath, 'resources', controls['icons'][i])
if os.path.exists(resourcePath):
controls['icons'][i] = 'resources/' + controls['icons'][i]
else:
controls['icons'][i] = ''
class commandCreatedEventHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
senderId = args.firingEvent.sender.id[len(thisAddinName):]
if senderId in controls['ids']:
idx = controls['ids'].index(senderId)
if controls['paths'][idx]:
path = os.path.realpath(controls['paths'][idx])
elif senderId in undocumentedControls['ids']:
idx = undocumentedControls['ids'].index(senderId)
if undocumentedControls['paths'][idx]:
path = os.path.realpath(undocumentedControls['paths'][idx])
else:
ui.messageBox('Control not in list', '{} v{}'.format(thisAddinTitle, thisAddinVersion), adsk.core.MessageBoxButtonTypes.OKButtonType, adsk.core.MessageBoxIconTypes.CriticalIconType)
path = None
if path:
if os.path.exists(path):
if platform.system() == 'Windows':
os.startfile(path)
else:
subprocess.check_call(["open", "--", path])
else:
ui.messageBox('Path not found: ' + path, '{} v{}'.format(thisAddinTitle, thisAddinVersion), adsk.core.MessageBoxButtonTypes.OKButtonType, adsk.core.MessageBoxIconTypes.CriticalIconType)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()), '{} v{}'.format(thisAddinTitle, thisAddinVersion), adsk.core.MessageBoxButtonTypes.OKButtonType, adsk.core.MessageBoxIconTypes.CriticalIconType)
def getUserDataPath():
if platform.system() == 'Windows':
dataPath = os.path.join(os.getenv('APPDATA'), thisAddinName + 'ForFusion360')
else:
dataPath = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', thisAddinName + 'ForFusion360')
if not os.path.exists(dataPath):
os.mkdir(dataPath)
userDataPath = os.path.join(dataPath, app.userId)
if not os.path.exists(userDataPath):
os.mkdir(userDataPath)
return userDataPath
def createJsonFiles(customPathFile):
emptyControls = {
'titles': [],
'ids': [],
'parentsIds': [],
'types': [],
'paths': [],
'separators': [],
'icons': []
}
with open(customPathFile, 'w') as f:
json.dump(emptyControls, f, indent=2)
def cleanUI():
solidScripts = ui.allToolbarPanels.itemById('SolidScriptsAddinsPanel')
cntrls = solidScripts.controls
separator = cntrls.itemById(thisAddinName + 'separator')
if separator:
separator.deleteMe()
cmdDefs = ui.commandDefinitions
for i in range(0, len(controls['titles'])):
cmdDef = cmdDefs.itemById(thisAddinName + controls['ids'][i])
if cmdDef:
cmdDef.deleteMe()
if not nestedMenuBugFixed:
for i in range(0, len(undocumentedControls['titles'])):
cmdDef = cmdDefs.itemById(thisAddinName + undocumentedControls['ids'][i])
if cmdDef:
cmdDef.deleteMe()
dropdownCntr = cntrls.itemById(thisAddinName + 'root' + 'Dropdown')
if dropdownCntr:
for i in range(0, len(controls['titles'])):
cntrl = dropdownCntr.controls.itemById(thisAddinName + controls['ids'][i])
if cntrl:
cntrl.isPromoted = False
cntrl.deleteMe()
if controls['separators'][i]:
cntrl = dropdownCntr.controls.itemById(thisAddinName + controls['ids'][i] + 'separator')
if cntrl:
cntrl.isPromoted = False
cntrl.deleteMe()
dropdownCntr.deleteMe()
if not nestedMenuBugFixed:
dropdownCntr = cntrls.itemById(thisAddinName + 'root' + 'Dropdown' + 'Undoc')
if dropdownCntr:
for i in range(0, len(undocumentedControls['titles'])):
cntrl = dropdownCntr.controls.itemById(thisAddinName + undocumentedControls['ids'][i])
if cntrl:
cntrl.isPromoted = False
cntrl.deleteMe()
if undocumentedControls['separators'][i]:
cntrl = dropdownCntr.controls.itemById(thisAddinName + undocumentedControls['ids'][i] + 'separator')
if cntrl:
cntrl.isPromoted = False
cntrl.deleteMe()
dropdownCntr.deleteMe()
def run(context):
try:
getDefaultControls()
if nestedMenuBugFixed and showUndocumentedFolders:
getUndocumentedControls()
getCustomControls()
cmdDefs = ui.commandDefinitions
commandCreated = commandCreatedEventHandler()
solidScripts = ui.allToolbarPanels.itemById('SolidScriptsAddinsPanel')
solidScripts.controls.addSeparator(thisAddinName + 'separator', '')
solidScripts.controls.addDropDown(thisAddinTitle, '', thisAddinName + 'root' + 'Dropdown', '', False)
for i in range(0, len(controls['icons'])):
if controls['icons'][i] != '':
resourcePath = os.path.join(thisFilePath, 'resources', controls['icons'][i])
if os.path.exists(resourcePath):
controls['icons'][i] = 'resources/' + controls['icons'][i]
else:
controls['icons'][i] = ''
for i in range(0, len(controls['titles'])):
if controls['types'][i] == 'command':
button = cmdDefs.addButtonDefinition(thisAddinName + controls['ids'][i], controls['titles'][i], controls['paths'][i], controls['icons'][i])
button.commandCreated.add(commandCreated)
handlers.append(commandCreated)
if controls['parentsIds'][i] == 'root':
dropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown')
else:
rootDropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown')
dropdown = rootDropdown.controls.itemById(thisAddinName + controls['parentsIds'][i])
dropdown.controls.addCommand(button)
else:
dropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown')
dropdown.controls.addDropDown(controls['titles'][i], controls['icons'][i], thisAddinName + controls['ids'][i], '', False)
if controls['separators'][i]:
dropdown.controls.addSeparator(thisAddinName + controls['ids'][i] + 'separator', '')
if not nestedMenuBugFixed and showUndocumentedFolders:
getUndocumentedControls()
for i in range(0, len(undocumentedControls['icons'])):
if undocumentedControls['icons'][i] != '':
resourcePath = os.path.join(thisFilePath, 'resources', undocumentedControls['icons'][i])
if os.path.exists(resourcePath):
undocumentedControls['icons'][i] = 'resources/' + undocumentedControls['icons'][i]
else:
undocumentedControls['icons'][i] = ''
solidScripts.controls.addDropDown(thisAddinTitle + ' (undocumented)', '', thisAddinName + 'root' + 'Dropdown' + 'Undoc', '', False)
for i in range(0, len(undocumentedControls['titles'])):
if undocumentedControls['types'][i] == 'command':
button = cmdDefs.addButtonDefinition(thisAddinName + undocumentedControls['ids'][i], undocumentedControls['titles'][i], undocumentedControls['paths'][i], undocumentedControls['icons'][i])
button.commandCreated.add(commandCreated)
handlers.append(commandCreated)
if undocumentedControls['parentsIds'][i] == 'root':
dropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown' + 'Undoc')
else:
rootDropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown' + 'Undoc')
dropdown = rootDropdown.controls.itemById(thisAddinName + undocumentedControls['parentsIds'][i])
dropdown.controls.addCommand(button)
else:
dropdown = solidScripts.controls.itemById(thisAddinName + 'root' + 'Dropdown' + 'Undoc')
dropdown.controls.addDropDown(undocumentedControls['titles'][i], undocumentedControls['icons'][i], thisAddinName + undocumentedControls['ids'][i], '', False)
if undocumentedControls['separators'][i]:
dropdown.controls.addSeparator(thisAddinName + undocumentedControls['ids'][i] + 'separator', '')
if context['IsApplicationStartup'] is False:
ui.messageBox("The '{}' command has been added\nto the ADD-INS panel of the DESIGN workspace.".format(thisAddinTitle), '{} v{}'.format(thisAddinTitle, thisAddinVersion))
except:
if ui:
cleanUI()
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()), '{} v{}'.format(thisAddinTitle, thisAddinVersion))
def stop(context):
try:
cleanUI()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()), '{} v{}'.format(thisAddinTitle, thisAddinVersion))
```
|
{
"source": "Jerome-Celle/Blitz-API",
"score": 3
}
|
#### File: blitz_api/tests/tests_model_Domain.py
```python
from django.db import IntegrityError, transaction
from rest_framework.test import APITestCase
from ..models import Domain, Organization
class DomainTests(APITestCase):
@classmethod
def setUpClass(cls):
super(DomainTests, cls).setUpClass()
cls.org = Organization.objects.create(name="random_university")
def test_create(self):
"""
Ensure that we can create a domain with a valid organization.
"""
domain = Domain.objects.create(
name="random_domain",
organization_id=self.org.id
)
self.assertEqual(domain.__str__(), "random_domain")
```
#### File: blitz_api/tests/tests_view_UsersExport.py
```python
import json
import re
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.conf import settings
from xlrd import open_workbook
from xlrd.sheet import Sheet
from ..factories import UserFactory, AdminFactory
User = get_user_model()
class UsersTests(APITestCase):
@classmethod
def setUpClass(cls):
super(UsersTests, cls).setUpClass()
cls.client = APIClient()
cls.client_authenticate = APIClient()
cls.export_url = reverse('user-export')
cls.regex_file_name = f'({settings.MEDIA_ROOT}.*\\.xls)'
def setUp(self):
self.user = UserFactory()
self.user.set_password('<PASSWORD>!')
self.user.save()
self.admin = AdminFactory()
self.admin.set_password('<PASSWORD>!')
self.admin.save()
self.client_authenticate.force_authenticate(user=self.admin)
self.nb_setup_user = 2
def test_export_content(self):
response: Response = self.client_authenticate.get(
self.export_url
)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
response.content
)
export_response = json.loads(response.content)
self.assertEqual(
export_response['count'],
self.nb_setup_user,
"Count value of export is different than expected"
)
self.assertEqual(
export_response['limit'],
1000,
)
self.assertIn(settings.MEDIA_ROOT,
export_response['file_url'],
export_response['file_url'])
file_path = re.findall(self.regex_file_name,
export_response['file_url'])[0]
wb = open_workbook(file_path)
first_sheet: Sheet = wb.sheets()[0]
col_infos = []
for col_number in range(first_sheet.ncols):
try:
col_infos.append({
'col_number': col_number,
'col_name': first_sheet.cell(0, col_number).value
})
except Exception:
pass
users = []
for row in range(1, first_sheet.nrows):
user_data = dict()
try:
for col_info in col_infos:
user_info = first_sheet.cell(
row,
col_info['col_number']).value
user_data[col_info['col_name']] = user_info
except Exception:
pass
users.append(user_data)
user_0_id = users[0]['id']
user_0 = User.objects.get(id=user_0_id)
self.assertEqual(
users[0]['first_name'],
user_0.first_name,
users[0]
)
```
#### File: retirement/tests/tests_model_Reservation.py
```python
from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from rest_framework.test import APITestCase
from blitz_api.factories import UserFactory
from store.models import Order, OrderLine
from ..models import Reservation, Retirement
LOCAL_TIMEZONE = pytz.timezone(settings.TIME_ZONE)
class ReservationTests(APITestCase):
@classmethod
def setUpClass(cls):
super(ReservationTests, cls).setUpClass()
cls.user = UserFactory()
cls.retirement_type = ContentType.objects.get_for_model(Retirement)
cls.retirement = Retirement.objects.create(
name="random_retirement",
details="This is a description of the retirement.",
seats=40,
address_line1="123 random street",
postal_code="123 456",
state_province="Random state",
country="Random country",
price=3,
start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),
end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),
min_day_refund=7,
min_day_exchange=7,
refund_rate=100,
is_active=True,
accessibility=True,
form_url="example.com",
carpool_url='example2.com',
review_url='example3.com',
has_shared_rooms=True
)
cls.order = Order.objects.create(
user=cls.user,
transaction_date=timezone.now(),
authorization_id=1,
settlement_id=1,
)
cls.order_line = OrderLine.objects.create(
order=cls.order,
quantity=999,
content_type=cls.retirement_type,
object_id=1,
)
def test_create(self):
"""
Ensure that we can create a time_slot.
"""
reservation = Reservation.objects.create(
user=self.user,
retirement=self.retirement,
order_line=self.order_line,
is_active=True,
)
self.assertEqual(str(reservation), str(self.user))
```
#### File: retirement/tests/tests_viewset_WaitQueue.py
```python
import json
from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from blitz_api.factories import AdminFactory, UserFactory
from ..models import Retirement, WaitQueue
User = get_user_model()
LOCAL_TIMEZONE = pytz.timezone(settings.TIME_ZONE)
class WaitQueueTests(APITestCase):
@classmethod
def setUpClass(cls):
super(WaitQueueTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.user2 = UserFactory()
cls.admin = AdminFactory()
def setUp(self):
self.retirement = Retirement.objects.create(
name="mega_retirement",
details="This is a description of the mega retirement.",
seats=400,
address_line1="123 random street",
postal_code="123 456",
state_province="Random state",
country="Random country",
price=199,
start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),
end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),
min_day_refund=7,
min_day_exchange=7,
refund_rate=50,
is_active=True,
activity_language='FR',
next_user_notified=3,
accessibility=True,
form_url="example.com",
carpool_url='example2.com',
review_url='example3.com',
has_shared_rooms=True,
)
self.wait_queue_subscription = WaitQueue.objects.create(
user=self.user2,
retirement=self.retirement,
)
def test_create(self):
"""
Ensure we can subscribe a user to a retirement wait_queue.
"""
self.client.force_authenticate(user=self.user)
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
# The 'user' field is ignored when the calling user is not admin.
# The field is REQUIRED nonetheless.
'user': reverse('user-detail', args=[self.admin.id]),
}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
response.content,
)
content = {
'list_size': 2,
'retirement': 'http://testserver/retirement/retirements/' +
str(self.retirement.id),
'user': ''.join(['http://testserver/users/', str(self.user.id)]),
'created_at': json.loads(response.content)['created_at'],
}
response_data = json.loads(response.content)
del response_data['id']
del response_data['url']
self.assertEqual(
response_data,
content
)
def test_create_as_admin_for_user(self):
"""
Ensure we can subscribe another user to a retirement wait_queue as
an admin user.
"""
self.client.force_authenticate(user=self.admin)
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
'user': reverse('user-detail', args=[self.user.id]),
}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
response.content,
)
content = {
'list_size': 2,
'retirement': 'http://testserver/retirement/retirements/' +
str(self.retirement.id),
'user': ''.join(['http://testserver/users/', str(self.user.id)]),
}
response_data = json.loads(response.content)
del response_data['id']
del response_data['url']
del response_data['created_at']
self.assertEqual(
response_data,
content
)
def test_create_not_authenticated(self):
"""
Ensure we can't subscribe to a retirement waitqueue if user has no
permission.
"""
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
'user': reverse('user-detail', args=[self.user.id]),
}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
content = {
'detail': 'Authentication credentials were not provided.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_duplicate(self):
"""
Ensure we can't subscribe to a retirement waitqueue twice.
"""
self.client.force_authenticate(user=self.admin)
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
'user': reverse('user-detail', args=[self.user2.id]),
}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
content = {
"non_field_errors": [
"The fields user, retirement must make a unique set."
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_missing_field(self):
"""
Ensure we can't subscribe to a retirement waitqueue when required field
are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
content = {
"retirement": ["This field is required."],
"user": ["This field is required."]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't subscribe to a retirement waitqueue with invalid
fields.
"""
self.client.force_authenticate(user=self.admin)
data = {
'retirement': (1,),
'user': "http://testserver/invalid/999"
}
response = self.client.post(
reverse('retirement:waitqueue-list'),
data,
format='json',
)
content = {
'retirement': [
'Incorrect type. Expected URL string, received list.'
],
'user': ['Invalid hyperlink - No URL match.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can't update a subscription to a retirement waitqueue.
"""
self.client.force_authenticate(user=self.admin)
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
'user': reverse('user-detail', args=[self.user2.id]),
}
response = self.client.put(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': 1},
),
data,
format='json',
)
self.assertEqual(
response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_partial_update(self):
"""
Ensure we can't partially a subscription to a retirement waitqueue.
"""
self.client.force_authenticate(user=self.admin)
data = {
'retirement': reverse(
'retirement:retirement-detail', args=[self.retirement.id]
),
'user': reverse('user-detail', args=[self.user2.id]),
}
response = self.client.put(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': 1},
),
data,
format='json',
)
self.assertEqual(
response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_delete(self):
"""
Ensure we can delete a subscription to a retirement waitqueue.
The index determining the next user to be notified should be corrected.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': self.wait_queue_subscription.id},
),
)
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
response.content
)
self.retirement.refresh_from_db()
self.assertEqual(self.retirement.next_user_notified, 2)
def test_list(self):
"""
Ensure we can list subscriptions to retirement waitqueues as an
authenticated user.
"""
self.client.force_authenticate(user=self.user2)
response = self.client.get(
reverse('retirement:waitqueue-list'),
format='json',
)
response_data = json.loads(response.content)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'created_at': response_data['results'][0]['created_at'],
'id': self.wait_queue_subscription.id,
'list_size': 1,
'retirement':
'http://testserver/retirement/retirements/' +
str(self.retirement.id),
'url':
'http://testserver/retirement/wait_queues/' +
str(self.wait_queue_subscription.id),
'user': 'http://testserver/users/' + str(self.user2.id)
}]
}
self.assertEqual(response_data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_not_authenticated(self):
"""
Ensure we can't list subscriptions to retirement waitqueues as an
unauthenticated user.
"""
response = self.client.get(
reverse('retirement:waitqueue-list'),
format='json',
)
content = {'detail': 'Authentication credentials were not provided.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_read(self):
"""
Ensure we can read read a subscription to a retirement as an
authenticated user.
"""
self.client.force_authenticate(user=self.user2)
response = self.client.get(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': self.wait_queue_subscription.id},
),
)
content = {
'id': self.wait_queue_subscription.id,
'list_size': 1,
'retirement':
'http://testserver/retirement/retirements/' +
str(self.retirement.id),
'url':
'http://testserver/retirement/wait_queues/' +
str(self.wait_queue_subscription.id),
'user': ''.join(['http://testserver/users/', str(self.user2.id)]),
'created_at': json.loads(response.content)['created_at'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_not_authenticated(self):
"""
Ensure we can't read a subscription to a retirement waitqueues as an
unauthenticated user.
"""
response = self.client.get(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': 1},
),
format='json',
)
content = {'detail': 'Authentication credentials were not provided.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_read_as_admin(self):
"""
Ensure we can read read a subscription to a retirement as an admin
user.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': self.wait_queue_subscription.id},
),
)
response_data = json.loads(response.content)
content = {
'id': self.wait_queue_subscription.id,
'list_size': 1,
'retirement':
'http://testserver/retirement/retirements/' +
str(self.retirement.id),
'url':
'http://testserver/retirement/wait_queues/' +
str(self.wait_queue_subscription.id),
'user': ''.join(['http://testserver/users/', str(self.user2.id)]),
'created_at': json.loads(response.content)['created_at'],
}
self.assertEqual(response_data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent(self):
"""
Ensure we get not found when asking for a subscription to a retirement
that doesn't exist.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'retirement:waitqueue-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
```
#### File: Blitz-API/store/permissions.py
```python
from rest_framework import permissions
class IsAdminOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow admins to modify objects.
"""
def has_permission(self, request, view):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return request.user.is_staff
class IsAdminOrCreateReadOnly(permissions.BasePermission):
"""
Custom permission to only allow admins to update/delete objects.
"""
def has_permission(self, request, view):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Always allow object creation
if request.method == 'POST':
return True
return request.user.is_staff
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow admins or owners of an object to view/edit.
"""
def has_object_permission(self, request, view, obj):
# Test if the object is the user himself otherwise verifies
# if a owner field exists and equals the user.
return (request.user.is_staff or
obj == request.user or
(hasattr(obj, 'owner') and obj.owner == request.user),
(hasattr(obj, 'user') and obj.user == request.user))
```
#### File: Blitz-API/store/resources.py
```python
from django.apps import apps
from django.contrib.auth import get_user_model
from import_export import fields, resources
from import_export.widgets import (ForeignKeyWidget, ManyToManyWidget,
DateTimeWidget)
from blitz_api.models import AcademicLevel
from blitz_api.services import get_model_from_name
from .models import (Membership, Order, OrderLine, Package, CustomPayment,
Coupon, CouponUser, Refund, )
User = get_user_model()
# django-import-export models declaration
# These represent the models data that will be importd/exported
class MembershipResource(resources.ModelResource):
academic_levels = fields.Field(
column_name='academic_levels',
attribute='academic_levels',
widget=ManyToManyWidget(AcademicLevel, ',', 'name'),
)
class Meta:
model = Membership
fields = (
'id',
'name',
'details',
'price',
'duration',
'academic_levels',
'available',
)
export_order = (
'id',
'name',
'details',
'price',
'duration',
'academic_levels',
'available',
)
class OrderResource(resources.ModelResource):
user = fields.Field(
column_name='user',
attribute='user',
widget=ForeignKeyWidget(User, 'email'),
)
coupon = fields.Field(
column_name='coupon',
attribute='coupon',
widget=ForeignKeyWidget(Coupon, 'code'),
)
class Meta:
model = Order
fields = (
'id',
'user',
'transaction_date',
'authorization_id',
'settlement_id',
'coupon',
)
export_order = (
'id',
'user',
'transaction_date',
'authorization_id',
'settlement_id',
'coupon',
)
class OrderLineResource(resources.ModelResource):
user = fields.Field(
column_name='user',
attribute='order__user',
widget=ForeignKeyWidget(User, 'email'),
)
item_type = fields.Field(
column_name='item_type',
attribute='content_type__model',
)
item_name = fields.Field()
item_id = fields.Field()
def dehydrate_item_name(self, orderline):
model = get_model_from_name(orderline.content_type.model)
return model.objects.get(id=orderline.object_id).name
def dehydrate_item_id(self, orderline):
model = get_model_from_name(orderline.content_type.model)
return model.objects.get(id=orderline.object_id).id
class Meta:
model = OrderLine
fields = (
'id',
'user',
'item_type',
'item_name',
'item_id',
'quantity',
'order',
)
export_order = (
'id',
'user',
'item_type',
'item_name',
'item_id',
'quantity',
'order',
)
class PackageResource(resources.ModelResource):
memberships = fields.Field(
column_name='memberships',
attribute='exclusive_memberships',
widget=ManyToManyWidget(Membership, ',', 'name'),
)
class Meta:
model = Package
fields = (
'id',
'name',
'details',
'price',
'reservations',
'memberships',
'available',
)
export_order = (
'id',
'name',
'details',
'price',
'reservations',
'memberships',
'available',
)
class CustomPaymentResource(resources.ModelResource):
user = fields.Field(
column_name='user',
attribute='user',
widget=ForeignKeyWidget(User, 'email'),
)
class Meta:
model = CustomPayment
fields = (
'id',
'name',
'details',
'price',
'user',
'transaction_date',
'authorization_id',
'settlement_id',
)
export_order = (
'id',
'name',
'details',
'price',
'user',
'transaction_date',
'authorization_id',
'settlement_id',
)
class CouponResource(resources.ModelResource):
owner = fields.Field(
column_name='owner',
attribute='owner',
widget=ForeignKeyWidget(User, 'email'),
)
total_use = fields.Field()
def dehydrate_total_use(self, coupon):
uses = CouponUser.objects.filter(coupon=coupon)
return sum(uses.values_list('uses', flat=True))
class Meta:
model = Coupon
fields = (
'id',
'details',
'value',
'percent_off',
'code',
'owner',
'start_time',
'end_time',
'total_use',
)
export_order = (
'id',
'details',
'value',
'percent_off',
'code',
'owner',
'start_time',
'end_time',
'total_use',
)
class CouponUserResource(resources.ModelResource):
user_email = fields.Field(
column_name='user_email',
attribute='user',
widget=ForeignKeyWidget(User, 'email'),
)
user_firstname = fields.Field(
column_name='user_firstname',
attribute='user',
widget=ForeignKeyWidget(User, 'first_name'),
)
user_lastname = fields.Field(
column_name='user_lastname',
attribute='user',
widget=ForeignKeyWidget(User, 'last_name'),
)
student_number = fields.Field(
column_name='student_number',
attribute='user',
widget=ForeignKeyWidget(User, 'student_number'),
)
academic_program_code = fields.Field(
column_name='academic_program_code',
attribute='user',
widget=ForeignKeyWidget(User, 'academic_program_code'),
)
university = fields.Field(
column_name='university',
attribute='user',
widget=ForeignKeyWidget(User, 'university__name'),
)
class Meta:
model = CouponUser
fields = (
'user_email',
'university',
'user_firstname',
'user_lastname',
'student_number',
'academic_program_code',
'uses',
)
export_order = (
'user_email',
'university',
'user_firstname',
'user_lastname',
'student_number',
'academic_program_code',
'uses',
)
class RefundResource(resources.ModelResource):
orderline = fields.Field(
column_name='orderline',
attribute='orderline',
widget=ForeignKeyWidget(OrderLine, 'content_type__model'),
)
product_name = fields.Field(
column_name='product_name',
attribute='orderline',
widget=ForeignKeyWidget(OrderLine, 'content_object__name'),
)
class Meta:
model = Refund
fields = (
'id',
'orderline',
'product_name',
'amount',
'details',
'refund_date',
)
export_order = (
'id',
'orderline',
'product_name',
'amount',
'details',
'refund_date',
)
```
#### File: store/tests/tests_model_Refund.py
```python
from datetime import timedelta
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from rest_framework.test import APITestCase
from blitz_api.factories import UserFactory
from ..models import Membership, Order, OrderLine, Refund
class RefundTests(APITestCase):
@classmethod
def setUpClass(cls):
super(RefundTests, cls).setUpClass()
cls.membership_type = ContentType.objects.get_for_model(Membership)
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
available=True,
price=50,
duration=timedelta(days=365),
)
cls.user = UserFactory()
cls.order = Order.objects.create(
user=cls.user,
transaction_date=timezone.now(),
authorization_id=1,
settlement_id=1,
)
cls.orderline = OrderLine.objects.create(
order=cls.order,
quantity=999,
content_type=cls.membership_type,
object_id=cls.membership.id,
)
def test_create(self):
"""
Ensure that we can create a membership.
"""
refund = Refund.objects.create(
orderline=self.orderline,
refund_date=timezone.now(),
amount=10.00,
details="Refund details",
)
self.assertEqual(str(refund), 'basic_membership, qt:999, 10.0$')
```
#### File: Blitz-API/workplace/fields.py
```python
import pytz
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
class TimezoneField(serializers.CharField):
def to_internal_value(self, value):
tz = super().to_representation(value)
try:
return str(pytz.timezone(tz))
except pytz.exceptions.UnknownTimeZoneError:
raise serializers.ValidationError(_("Unknown timezone"))
```
#### File: workplace/tests/tests_viewset_Period.py
```python
import json
import pytz
from datetime import datetime, timedelta
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.utils import timezone
from django.conf import settings
from django.core import mail
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.services import remove_translation_fields
from ..models import Workplace, Period, TimeSlot, Reservation
User = get_user_model()
LOCAL_TIMEZONE = pytz.timezone(settings.TIME_ZONE)
class PeriodTests(APITestCase):
@classmethod
def setUpClass(cls):
super(PeriodTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
cls.workplace = Workplace.objects.create(
name="Blitz",
seats=40,
details="short_description",
address_line1="123 random street",
postal_code="123 456",
state_province="Random state",
country="Random country",
)
cls.period = Period.objects.create(
name="random_period",
workplace=cls.workplace,
start_date=timezone.now(),
end_date=timezone.now() + timedelta(weeks=4),
price=3,
is_active=False,
)
cls.period_active = Period.objects.create(
name="random_period_active",
workplace=cls.workplace,
start_date=timezone.now(),
end_date=timezone.now() + timedelta(weeks=4),
price=3,
is_active=True,
)
cls.time_slot_active = TimeSlot.objects.create(
name="evening_time_slot_active",
period=cls.period_active,
price=3,
start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 18)),
end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 22)),
)
cls.reservation = Reservation.objects.create(
user=cls.user,
timeslot=cls.time_slot_active,
is_active=True,
)
def test_create(self):
"""
Ensure we can create a period if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': LOCAL_TIMEZONE.localize(
datetime.now() +
timedelta(weeks=5)),
'end_date': LOCAL_TIMEZONE.localize(
datetime.now() + timedelta(weeks=10)),
'price': '3.00',
'is_active': True,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'end_date': data['end_date'].isoformat(),
'is_active': True,
'name': 'random_period',
'price': '3.00',
'total_reservations': 0,
'start_date': data['start_date'].isoformat(),
'workplace': f'http://testserver/workplaces/{self.workplace.id}'
}
response_content = json.loads(response.content)
del response_content['id']
del response_content['url']
self.assertEqual(
remove_translation_fields(response_content),
content
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_without_permission(self):
"""
Ensure we can't create a period if user has no permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'name': "random_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': timezone.now(),
'end_date': timezone.now() + timedelta(weeks=4),
'price': '3.00',
'is_active': True,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_overlapping(self):
"""
Ensure we can't create overlapping period in the same workplace.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': timezone.now(),
'end_date': timezone.now() + timedelta(weeks=4),
'price': '3.00',
'is_active': True,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'non_field_errors': [
'An active period associated to the same workplace overlaps '
'with the provided start_date and end_date.'
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_start_end(self):
"""
Ensure we can't create periods with start_date greater than end_date.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': timezone.now(),
'end_date': timezone.now() - timedelta(weeks=4),
'price': '3.00',
'is_active': True,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'end_date': ['End date must be later than start_date.'],
'start_date': ['Start date must be earlier than end_date.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_non_existent_workplace(self):
"""
Ensure we can't create a period with a non-existent workplace.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_period",
'workplace': reverse('workplace-detail', args=[999]),
'start_date': timezone.now(),
'end_date': timezone.now() + timedelta(weeks=4),
'price': '3.00',
'is_active': True,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {'workplace': ['Invalid hyperlink - Object does not exist.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_missing_field(self):
"""
Ensure we can't create a period when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'end_date': ['This field is required.'],
'is_active': ['This field is required.'],
'price': ['This field is required.'],
'start_date': ['This field is required.'],
'workplace': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_blank_field(self):
"""
Ensure we can't create a period when required field are blank.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': None,
'workplace': None,
'start_date': None,
'end_date': None,
'price': None,
'is_active': None,
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'name': ['This field may not be null.'],
'start_date': ['This field may not be null.'],
'end_date': ['This field may not be null.'],
'price': ['This field may not be null.'],
'is_active': ['This field may not be null.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create a timeslot when required field are invalid.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "",
'workplace': "invalid",
'start_date': "",
'end_date': "",
'price': "",
'is_active': "",
}
response = self.client.post(
reverse('period-list'),
data,
format='json',
)
content = {
'end_date': [
'Datetime has wrong format. Use one of these formats instead: '
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'
],
'is_active': ['Must be a valid boolean.'],
'name': ['This field may not be blank.'],
'price': ['A valid number is required.'],
'start_date': [
'Datetime has wrong format. Use one of these formats instead: '
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'
],
'workplace': ['Invalid hyperlink - No URL match.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
Full updates and partial updates are limited. If reservations exist, these
actions are forbidden.
In a future iteration, we could allow updates with the exception of:
- Postpone start_date
- Bring forward end_date
- Set is_active to False
"""
def test_update(self):
"""
Ensure we can update a period without reservations.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "new_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': LOCAL_TIMEZONE.localize(
datetime.now() + timedelta(weeks=5)),
'end_date': LOCAL_TIMEZONE.localize(
datetime.now() + timedelta(weeks=10)),
'price': '3.00',
'is_active': True,
}
response = self.client.put(
reverse(
'period-detail',
args=[self.period.id]
),
data,
format='json',
)
content = {
'id': self.period.id,
'end_date': data['end_date'].isoformat(),
'is_active': True,
'name': 'new_period',
'price': '3.00',
'total_reservations': 0,
'start_date': data['start_date'].isoformat(),
'url': f'http://testserver/periods/{self.period.id}',
'workplace': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_with_reservations(self):
"""
Ensure we can't update a period that contains time slots with
reservations.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "new_period",
'workplace': reverse('workplace-detail', args=[self.workplace.id]),
'start_date': timezone.now() + timedelta(weeks=5),
'end_date': timezone.now() + timedelta(weeks=10),
'price': '3.00',
'is_active': True,
}
response = self.client.put(
reverse(
'period-detail',
args=[self.period_active.id]
),
data,
format='json',
)
content = {
'non_field_errors': [
"The period contains timeslots with user reservations."
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_partial(self):
"""
Ensure we can partially update a period.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "updated_period",
'start_date': LOCAL_TIMEZONE.localize(
datetime.now() + timedelta(weeks=1)),
'price': '2000.00',
}
response = self.client.patch(
reverse(
'period-detail',
args=[self.period.id]
),
data,
format='json',
)
response_data = json.loads(response.content)
content = {
'id': self.period.id,
'is_active': False,
'name': 'updated_period',
'price': '2000.00',
'total_reservations': 0,
'end_date': response_data['end_date'],
'start_date': data['start_date'].isoformat(),
'url': f'http://testserver/periods/{self.period.id}',
'workplace': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_partial_with_reservations(self):
"""
Ensure we can't partially update a period that contains time slots with
reservations.
The next step is to allow only these actions:
- The start_date can be set to an earlier date.
- The end_date can be set to a later date.
- The is_active field can be set to True.
- The name can change.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "updated_period",
'start_date': timezone.now() + timedelta(weeks=1),
'price': '2000.00',
}
response = self.client.patch(
reverse(
'period-detail',
args=[self.period_active.id]
),
data,
format='json',
)
content = {
'non_field_errors': [
"The period contains timeslots with user reservations."
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_partial_overlapping(self):
"""
Ensure we can't partially update an active period if it overlaps with
another active period.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "updated_period",
'start_date': timezone.now() + timedelta(weeks=1),
'price': '2000.00',
'is_active': True,
}
response = self.client.patch(
reverse(
'period-detail',
args=[self.period.id]
),
data,
format='json',
)
content = {
'non_field_errors': [
'An active period associated to the same workplace overlaps '
'with the provided start_date and end_date.'
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete(self):
"""
Ensure we can delete a period that has no reservations.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'period-detail',
args=[self.period.id]
),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_with_reservations(self):
"""
Ensure we can delete a period that has reservations.
"""
self.client.force_authenticate(user=self.admin)
reservation_2 = Reservation.objects.create(
user=self.user,
timeslot=self.time_slot_active,
is_active=True,
)
data = {
'force_delete': True,
}
response = self.client.delete(
reverse(
'period-detail',
args=[self.period_active.id]
),
data,
format='json',
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.reservation.refresh_from_db()
self.user.refresh_from_db()
self.admin.refresh_from_db()
# Make sure the timeslot was deleted (cascade)
self.assertFalse(
TimeSlot.objects.filter(
name="evening_time_slot_active"
).exists()
)
self.assertFalse(self.reservation.is_active)
self.assertEqual(self.reservation.cancelation_reason, 'TD')
self.assertTrue(self.reservation.cancelation_date)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(self.user.tickets, 3)
self.assertEqual(self.admin.tickets, 1)
self.reservation.is_active = True
self.reservation.cancelation_date = None
self.reservation.cancelation_reason = None
self.reservation.save()
self.reservation.refresh_from_db()
reservation_2.delete()
self.user.tickets = 0
self.user.save()
self.admin.tickets = 0
self.admin.save()
def test_delete_with_reservations_no_force(self):
"""
Ensure we can't delete a period that has reservations if the
force_delete field is not provided and set to True.
"""
self.client.force_authenticate(user=self.admin)
data = {
# 'force_delete': True,
}
response = self.client.delete(
reverse(
'period-detail',
args=[self.period_active.id]
),
data,
format='json',
)
content = {
"non_field_errors": [
"Trying to do a Period deletion that affects "
"users without providing `force_delete` field set to True."
]
}
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(json.loads(response.content), content)
def test_delete_with_reservations_invalid_force_delete(self):
"""
Ensure we can't delete a timeslot that has reservations if the
force_delete field is not provided and set to True.
"""
self.client.force_authenticate(user=self.admin)
data = {
'force_delete': "invalid",
}
response = self.client.delete(
reverse(
'period-detail',
args=[self.period.id]
),
data,
format='json',
)
content = {
'force_delete': [
'Must be a valid boolean.'
]
}
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(json.loads(response.content), content)
def test_list(self):
"""
Ensure we can list active periods as an unauthenticated user if is
active.
"""
response = self.client.get(
reverse('period-list'),
format='json',
)
data = json.loads(response.content)
data['results'] = [
remove_translation_fields(m) for m in data['results']
]
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'id': self.period_active.id,
'end_date': data['results'][0]['end_date'],
'is_active': True,
'name': 'random_period_active',
'price': '3.00',
'total_reservations': 1,
'start_date': data['results'][0]['start_date'],
'url': f'http://testserver/periods/{self.period_active.id}',
'workplace': f'http://testserver/workplaces/'
f'{self.workplace.id}'
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_inactive(self):
"""
Ensure we can list all periods as an admin user.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse('period-list'),
format='json',
)
data = json.loads(response.content)
data['results'] = [
remove_translation_fields(m) for m in data['results']
]
content = {
'count': 2,
'next': None,
'previous': None,
'results': [{
'id': self.period.id,
'end_date': data['results'][0]['end_date'],
'is_active': False,
'name': 'random_period',
'price': '3.00',
'total_reservations': 0,
'start_date': data['results'][0]['start_date'],
'url': f'http://testserver/periods/{self.period.id}',
'workplace':
f'http://testserver/workplaces/{self.workplace.id}'
}, {
'id': self.period_active.id,
'end_date': data['results'][1]['end_date'],
'is_active': True,
'name': 'random_period_active',
'price': '3.00',
'total_reservations': 1,
'start_date': data['results'][1]['start_date'],
'url': f'http://testserver/periods/{self.period_active.id}',
'workplace':
f'http://testserver/workplaces/{self.workplace.id}'
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read(self):
"""
Ensure we can read a period as an unauthenticated user if it is active.
"""
response = self.client.get(
reverse(
'period-detail',
args=[self.period_active.id]
),
)
data = json.loads(response.content)
content = {
'id': self.period_active.id,
'end_date': data['end_date'],
'is_active': True,
'name': 'random_period_active',
'price': '3.00',
'total_reservations': 1,
'start_date': data['start_date'],
'url': f'http://testserver/periods/{self.period_active.id}',
'workplace': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_inactive(self):
"""
Ensure we can read a period as admin if it is inactive.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'period-detail',
args=[self.period.id]
),
)
data = json.loads(response.content)
content = {
'id': self.period.id,
'end_date': data['end_date'],
'is_active': False,
'name': 'random_period',
'price': '3.00',
'total_reservations': 0,
'start_date': data['start_date'],
'url': f'http://testserver/periods/{self.period.id}',
'workplace': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_inactive_non_admin(self):
"""
Ensure we can't read a period as non_admin if it is inactive.
"""
response = self.client.get(
reverse(
'period-detail',
args=[self.period.id]
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_read_non_existent(self):
"""
Ensure we get not found when asking for a period that doesn't exist.
"""
response = self.client.get(
reverse(
'period-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
```
#### File: workplace/tests/tests_viewset_Workplace.py
```python
import json
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.services import remove_translation_fields
from ..models import Workplace
User = get_user_model()
class WorkplaceTests(APITestCase):
@classmethod
def setUpClass(cls):
super(WorkplaceTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
def setUp(self):
self.workplace = Workplace.objects.create(
name="Blitz",
seats=40,
details="short_description",
address_line1="random_address_1",
postal_code="RAN_DOM",
city='random_city',
state_province="Random_State",
country="Random_Country",
timezone="America/Montreal",
)
def test_create(self):
"""
Ensure we can create a workplace if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_workplace",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal",
'volunteers': [f"http://testserver/users/{self.user.id}"],
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
response_content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
response.content)
content = {
'details': 'short_description',
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'latitude': None,
'longitude': None,
'name': 'random_workplace',
'pictures': [],
'seats': 40,
'timezone': "America/Montreal",
'place_name': '',
'volunteers': [
f'http://testserver/users/{self.user.id}'
],
}
del response_content['id']
del response_content['url']
self.assertEqual(
remove_translation_fields(response_content),
content
)
def test_create_without_permission(self):
"""
Ensure we can't create a workplace if user has no permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'name': "random_workplace",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal"
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_duplicate_name(self):
"""
Ensure we can't create a workplace with same name.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "Blitz",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal"
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {'name': ['This field must be unique.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_missing_field(self):
"""
Ensure we can't create a workplace when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'details': ['This field is required.'],
'address_line1': ['This field is required.'],
'city': ['This field is required.'],
'country': ['This field is required.'],
'name': ['This field is required.'],
'postal_code': ['This field is required.'],
'seats': ['This field is required.'],
'state_province': ['This field is required.'],
'timezone': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create a workplace with invalid fields.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': ("invalid",),
'seats': "invalid",
'details': ("invalid",),
'postal_code': (1,),
'city': (1,),
'address_line1': (1,),
'country': (1,),
'state_province': (1,),
'timezone': ("invalid",),
'place_name': (1,),
'volunteers': (1,),
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'details': ['Not a valid string.'],
'name': ['Not a valid string.'],
'city': ['Not a valid string.'],
'address_line1': ['Not a valid string.'],
'postal_code': ['Not a valid string.'],
'state_province': ['Not a valid string.'],
'country': ['Not a valid string.'],
'seats': ['A valid integer is required.'],
'timezone': ['Unknown timezone'],
'place_name': ['Not a valid string.'],
'volunteers': [
'Incorrect type. Expected URL string, received int.'
],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can update a workplace.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "new_workplace",
'seats': 200,
'details': "new_short_description",
'address_line1': 'new_address',
'city': 'new_city',
'country': 'Random_Country',
'postal_code': 'NEW_CIT',
'state_province': 'Random_State',
'timezone': "America/Montreal",
}
response = self.client.put(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
data,
format='json',
)
content = {
'details': 'new_short_description',
'id': self.workplace.id,
'longitude': None,
'latitude': None,
'address_line1': 'new_address',
'address_line2': None,
'city': 'new_city',
'country': 'Random_Country',
'postal_code': 'NEW_CIT',
'state_province': 'Random_State',
'name': 'new_workplace',
'pictures': [],
'seats': 200,
'timezone': 'America/Montreal',
'place_name': '',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete(self):
"""
Ensure we can delete a workplace.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_list(self):
"""
Ensure we can list workplaces as an unauthenticated user.
"""
response = self.client.get(
reverse('workplace-list'),
format='json',
)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'details': 'short_description',
'id': self.workplace.id,
'latitude': None,
'longitude': None,
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'name': 'Blitz',
'pictures': [],
'seats': 40,
'timezone': 'America/Montreal',
'place_name': '',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read(self):
"""
Ensure we can read a workplace as an unauthenticated user.
"""
response = self.client.get(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
)
content = {
'details': 'short_description',
'id': self.workplace.id,
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'longitude': None,
'latitude': None,
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'name': 'Blitz',
'pictures': [],
'seats': 40,
'place_name': '',
'timezone': 'America/Montreal',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent_workplace(self):
"""
Ensure we get not found when asking for a workplace that doesn't exist.
"""
response = self.client.get(
reverse(
'workplace-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
```
|
{
"source": "jeromechiu/Travel_Salesman_Proble",
"score": 3
}
|
#### File: jeromechiu/Travel_Salesman_Proble/address_to_wgs84.py
```python
import googlemaps
import pandas as pd
from key import gmap_key
gmaps = googlemaps.Client(key=gmap_key)
"""
Sample of address
destinations = [[0, '新北市中和區中正路209號'],
[1, '新北市中和區建一路92號'],
[3, '新北市中和區景平路634-2號B1'],
[2, '新北市中和區連城路258號18樓']
]
Sample of WGS84
dest_coord = [[0, (24.993484, 121.497134)],
[1, (25.0007671, 121.4879088)],
[3, (24.9986295, 121.5007544)],
[2, (24.99663, 121.4869139)]]
"""
def to_coord(address):
return gmaps.geocode(address)
def transfer_address_geocord(destinations):
dest_coord = list()
for i, addr in destinations:
data = to_coord(addr)
lat, long = data[0]['geometry']['location']['lat'], data[0]['geometry']['location']['lng']
dest_coord.append([i, (lat, long)])
dest_coord = pd.DataFrame(dest_coord, columns=['id', 'coord'])
dest_coord.set_index('id', inplace=True)
dest_coord.sort_index(inplace=True)
return dest_coord
```
#### File: jeromechiu/Travel_Salesman_Proble/main.py
```python
import travel_point_grouping
# import routing
from address_to_wgs84 import transfer_address_geocord
from routing import calculate_tsp
from travel_point_grouping import wgs84_to_cartesian, grouping, wgs84_to_cartesian
destinations = [[0, '新北市中和區中正路209號'],
[1, '新北市中和區建一路92號'],
[3, '新北市中和區景平路634-2號B1'],
[2, '新北市中和區連城路258號18樓']
]
dest_coord = [[0, (24.993484, 121.497134)],
[1, (25.0007671, 121.4879088)],
[3, (24.9986295, 121.5007544)],
[2, (24.99663, 121.4869139)],
[4, (24.99675656624081, 121.50636226818159)],
[5, (25.002060969852035, 121.51072200728377)],
[6, (24.99648971473095, 121.50066515392008)],
[7, (24.99725077478079, 121.50031353934627)],
[8, (24.99674027185629, 121.49756159310002)],
[9, (24.996839941641497, 121.49789418699332)],
[10, (24.997515749272218, 121.49955447425104)],
[11, (24.995498039188387, 121.50097604502005)],
[12, (24.99587241199133, 121.50172974569763)],
[13, (24.99577760340233, 121.4988369834885)],
[14, (24.99635496227342, 121.50022636767291)],
[15, (24.996587107412616, 121.50234704426815)]
]
def main():
# dest_coord = transfer_address_geocord(destinations)
groupped = grouping(wgs84_to_cartesian(dest_coord), dest_coord)
print(groupped)
delivery_plan = calculate_tsp(groupped)
print(delivery_plan)
if __name__ == '__main__':
main()
```
|
{
"source": "jeromecn/caravel_viz_full",
"score": 2
}
|
#### File: caravel_viz_full/caravel/models.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import functools
import json
import logging
import pickle
import re
import textwrap
from collections import namedtuple
from copy import deepcopy, copy
from datetime import timedelta, datetime, date
import humanize
import pandas as pd
import requests
import sqlalchemy as sqla
from sqlalchemy.engine.url import make_url
from sqlalchemy.orm import subqueryload
import sqlparse
from dateutil.parser import parse
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.filters import Dimension, Filter
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.having import Aggregation
from six import string_types
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime, Date, Table, Numeric,
create_engine, MetaData, desc, asc, select, and_, func
)
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from sqlalchemy.orm.session import make_transient
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy.sql.expression import ColumnClause, TextAsFrom
from sqlalchemy_utils import EncryptedType
from werkzeug.datastructures import ImmutableMultiDict
import caravel
from caravel import app, db, db_engine_specs, get_session, utils, sm
from caravel.source_registry import SourceRegistry
from caravel.viz import viz_types
from caravel.jinja_context import get_template_processor
from caravel.utils import (
flasher, MetricPermException, DimSelector, wrap_clause_in_parens
)
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
FillterPattern = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class ImportMixin(object):
def override(self, obj):
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
def copy(self):
"""Creates a copy of the dashboard without relationships."""
new_obj = self.__class__()
new_obj.override(self)
return new_obj
def alter_params(self, **kwargs):
d = self.params_dict
d.update(kwargs)
self.params = json.dumps(d)
@property
def params_dict(self):
if self.params:
return json.loads(self.params)
else:
return {}
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls): # noqa
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls): # noqa
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@renders('created_on')
def creator(self): # noqa
return '{}'.format(self.created_by or '')
@property
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@renders('changed_on')
def changed_on_(self):
return Markup(
'<span class="no-wrap">{}</span>'.format(self.changed_on))
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return Markup('<span class="no-wrap">{}</span>'.format(s))
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable, ImportMixin):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(2000))
owners = relationship("User", secondary=slice_user)
export_fields = ('slice_name', 'datasource_type', 'datasource_name',
'viz_type', 'params', 'cache_timeout')
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(
self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@renders('datasource_name')
def datasource_link(self):
datasource = self.datasource
if datasource:
return self.datasource.link
@property
def datasource_edit_url(self):
self.datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
d['error'] = str(e)
d['slice_id'] = self.id
d['slice_name'] = self.slice_name
d['description'] = self.description
d['slice_url'] = self.slice_url
d['edit_url'] = self.edit_url
d['description_markeddown'] = self.description_markeddown
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{obj.datasource_type}/"
"{obj.datasource_id}/".format(obj=self))
return href(slice_params)
@property
def slice_id_url(self):
return (
"/caravel/{slc.datasource_type}/{slc.datasource_id}/{slc.id}/"
).format(slc=self)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self, url_params_multidict=None):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:param werkzeug.datastructures.MultiDict url_params_multidict:
Contains the visualization params, they override the self.params
stored in the database
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params) # {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else "table"
if url_params_multidict:
slice_params.update(url_params_multidict)
to_del = [k for k in slice_params if k not in url_params_multidict]
for k in to_del:
del slice_params[k]
immutable_slice_params = ImmutableMultiDict(slice_params)
return viz_types[immutable_slice_params.get('viz_type')](
self.datasource,
form_data=immutable_slice_params,
slice_=self
)
@classmethod
def import_obj(cls, slc_to_import, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(
remote_id=slc_to_import.id, import_time=import_time)
# find if the slice was already imported
slc_to_override = None
for slc in session.query(Slice).all():
if ('remote_id' in slc.params_dict and
slc.params_dict['remote_id'] == slc_to_import.id):
slc_to_override = slc
slc_to_import = slc_to_import.copy()
params = slc_to_import.params_dict
slc_to_import.datasource_id = SourceRegistry.get_datasource_by_name(
session, slc_to_import.datasource_type, params['datasource_name'],
params['schema'], params['database_name']).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
else:
session.add(slc_to_import)
logging.info('Final slice: {}'.format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def set_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
target.perm = ds.perm
sqla.event.listen(Slice, 'before_insert', set_perm)
sqla.event.listen(Slice, 'before_update', set_perm)
dashboard_slices = Table(
'dashboard_slices', Model.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id'))
)
class Dashboard(Model, AuditMixinNullable, ImportMixin):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship("User", secondary=dashboard_user)
export_fields = ('dashboard_title', 'position_json', 'json_metadata',
'description', 'css', 'slug')
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
return ", ".join({"{}".format(s.datasource) for s in self.slices})
@property
def url(self):
return "/caravel/dashboard/{}/".format(self.slug or self.id)
@property
def datasources(self):
return {slc.datasource for slc in self.slices}
@property
def sqla_metadata(self):
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title)
return Markup(
'<a href="{self.url}">{title}</a>'.format(**locals()))
@property
def json_data(self):
d = {
'id': self.id,
'metadata': self.params_dict,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': json.loads(self.position_json) if self.position_json else [],
}
return json.dumps(d)
@property
def params(self):
return self.json_metadata
@params.setter
def params(self, value):
self.json_metadata = value
@property
def position_array(self):
if self.position_json:
return json.loads(self.position_json)
return []
@classmethod
def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple caravel instances.
Audit metadata isn't copies over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position json:
[{
"col": 5,
"row": 10,
"size_x": 4,
"size_y": 2,
"slice_id": "3610"
}]
"""
position_array = dashboard.position_array
for position in position_array:
if 'slice_id' not in position:
continue
old_slice_id = int(position['slice_id'])
if old_slice_id in old_to_new_slc_id_dict:
position['slice_id'] = '{}'.format(
old_to_new_slc_id_dict[old_slice_id])
dashboard.position_json = json.dumps(position_array)
logging.info('Started import of the dashboard: {}'
.format(dashboard_to_import.to_json()))
session = db.session
logging.info('Dashboard has {} slices'
.format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
for slc in slices:
logging.info('Importing slice {} from the dashboard: {}'.format(
slc.to_json(), dashboard_to_import.dashboard_title))
new_slc_id = Slice.import_obj(slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = '{}'.format(new_slc_id)
old_slc_id_str = '{}'.format(slc.id)
if ('filter_immune_slices' in i_params_dict and
old_slc_id_str in i_params_dict['filter_immune_slices']):
new_filter_immune_slices.append(new_slc_id_str)
if ('expanded_slices' in i_params_dict and
old_slc_id_str in i_params_dict['expanded_slices']):
new_expanded_slices[new_slc_id_str] = (
i_params_dict['expanded_slices'][old_slc_id_str])
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if ('remote_id' in dash.params_dict and
dash.params_dict['remote_id'] ==
dashboard_to_import.id):
existing_dashboard = dash
dashboard_to_import.id = None
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(
expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices)
new_slices = session.query(Slice).filter(
Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
# session.add(dashboard_to_import) causes sqlachemy failures
# related to the attached users / slices. Creating new object
# allows to avoid conflicts in the sql alchemy state.
copied_dash = dashboard_to_import.copy()
copied_dash.slices = new_slices
session.add(copied_dash)
session.flush()
return copied_dash.id
@classmethod
def export_dashboards(cls, dashboard_ids):
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
copied_dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id).first()
)
make_transient(copied_dashboard)
for slc in copied_dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
# add extra params for the import
slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.name,
schema=slc.datasource.name,
database_name=slc.datasource.database.database_name,
)
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for dashboard_id, dashboard_type in datasource_ids:
eager_datasource = SourceRegistry.get_eager_datasource(
db.session, dashboard_type, dashboard_id)
eager_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.database_name,
)
make_transient(eager_datasource)
eager_datasources.append(eager_datasource)
return pickle.dumps({
'dashboards': copied_dashboards,
'datasources': eager_datasources,
})
class Queryable(object):
"""A common interface to objects that are queryable (tables and datasources)"""
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
id = Column(Integer, primary_key=True)
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=False)
allow_run_sync = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
def __repr__(self):
return self.database_name
@property
def name(self):
return self.database_name
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
def set_sqlalchemy_uri(self, uri):
password_mask = "X" * 10
conn = sqla.engine.url.make_url(uri)
if conn.password != password_mask:
# do not over-write the password with the password mask
self.password = <PASSWORD>
conn.password = password_mask if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_sqla_engine(self, schema=None):
extra = self.get_extra()
url = make_url(self.sqlalchemy_uri_decrypted)
params = extra.get('engine_params', {})
if self.backend == 'presto' and schema:
if '/' in url.database:
url.database = url.database.split('/')[0] + '/' + schema
else:
url.database += '/' + schema
elif schema:
url.database = schema
return create_engine(url, **params)
def get_reserved_words(self):
return self.get_sqla_engine().dialect.preparer.reserved_words
def get_quoter(self):
return self.get_sqla_engine().dialect.identifier_preparer.quote
def get_df(self, sql, schema):
sql = sql.strip().strip(';')
eng = self.get_sqla_engine(schema=schema)
cur = eng.execute(sql, schema=schema)
cols = [col[0] for col in cur.cursor.description]
df = pd.DataFrame(cur.fetchall(), columns=cols)
return df
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
compiled = qry.compile(eng, compile_kwargs={"literal_binds": True})
return '{}'.format(compiled)
def select_star(
self, table_name, schema=None, limit=100, show_cols=False,
indent=True):
"""Generates a ``select *`` statement in the proper dialect"""
for i in range(10):
print(schema)
quote = self.get_quoter()
fields = '*'
table = self.get_table(table_name, schema=schema)
if show_cols:
fields = [quote(c.name) for c in table.columns]
if schema:
table_name = schema + '.' + table_name
qry = select(fields).select_from(text(table_name))
if limit:
qry = qry.limit(limit)
sql = self.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
def wrap_sql_limit(self, sql, limit=1000):
qry = (
select('*')
.select_from(TextAsFrom(text(sql), ['*'])
.alias('inner_qry')).limit(limit)
)
return self.compile_sqla_query(qry)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
def all_table_names(self, schema=None):
return sorted(self.inspector.get_table_names(schema))
def all_view_names(self, schema=None):
views = []
try:
views = self.inspector.get_view_names(schema)
except Exception as e:
pass
return views
def all_schema_names(self):
return sorted(self.inspector.get_schema_names())
@property
def db_engine_spec(self):
engine_name = self.get_sqla_engine().name or 'base'
return db_engine_specs.engines.get(
engine_name, db_engine_specs.BaseEngineSpec)
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
return self.db_engine_spec.time_grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name, schema=None):
return self.inspector.get_columns(table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
def get_pk_constraint(self, table_name, schema=None):
return self.inspector.get_pk_constraint(table_name, schema)
def get_foreign_keys(self, table_name, schema=None):
return self.inspector.get_foreign_keys(table_name, schema)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/caravel/sql/{}/'.format(self.id)
@property
def perm(self):
return (
"[{obj.database_name}].(id:{obj.id})").format(obj=self)
class SqlaTable(Model, Queryable, AuditMixinNullable, ImportMixin):
"""An ORM object for SqlAlchemy table references"""
type = "table"
__tablename__ = 'tables'
id = Column(Integer, primary_key=True)
table_name = Column(String(250))
main_dttm_col = Column(String(250))
description = Column(Text)
default_endpoint = Column(Text)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
is_featured = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='tables', foreign_keys=[user_id])
database = relationship(
'Database',
backref=backref('tables', cascade='all, delete-orphan'),
foreign_keys=[database_id])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
schema = Column(String(255))
sql = Column(Text)
params = Column(Text)
baselink = "tablemodelview"
export_fields = (
'table_name', 'main_dttm_col', 'description', 'default_endpoint',
'database_id', 'is_featured', 'offset', 'cache_timeout', 'schema',
'sql', 'params')
__table_args__ = (
sqla.UniqueConstraint(
'database_id', 'schema', 'table_name',
name='_customer_location_uc'),)
def __repr__(self):
return self.table_name
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def link(self):
table_name = escape(self.table_name)
return Markup(
'<a href="{self.explore_url}">{table_name}</a>'.format(**locals()))
@property
def perm(self):
return (
"[{obj.database}].[{obj.table_name}]"
"(id:{obj.id})").format(obj=self)
@property
def name(self):
return self.table_name
@property
def full_name(self):
return utils.get_datasource_full_name(
self.database, self.table_name, schema=self.schema)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
@property
def time_column_grains(self):
return {
"time_columns": self.dttm_cols,
"time_grains": [grain.name for grain in self.database.grains()]
}
def get_col(self, col_name):
columns = self.columns
for col in columns:
if col_name == col.column_name:
return col
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
orderby=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
template_processor = get_template_processor(
table=self, database=self.database)
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
timeseries_limit_metric_expr = None
if timeseries_limit_metric:
timeseries_limit_metric_expr = \
timeseries_limit_metric.sqla_col
if metrics:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
# TODO: sqlalchemy 1.2 release should be doing this on its own.
# Patch only if the column clause is specific for DateTime set and
# granularity is selected.
@compiles(ColumnClause)
def visit_column(element, compiler, **kw):
text = compiler.visit_column(element, **kw)
try:
if element.is_literal and hasattr(element.type, 'python_type') and \
type(element.type) is DateTime:
text = text.replace('%%', '%')
except NotImplementedError:
pass # Some elements raise NotImplementedError for python_type
return text
dttm_col = cols[granularity]
dttm_expr = dttm_col.sqla_col.label('timestamp')
timestamp = dttm_expr
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
db_engine_spec = self.database.db_engine_spec
if dttm_col.python_date_format == 'epoch_s':
dttm_expr = \
db_engine_spec.epoch_to_dttm().format(col=dttm_expr)
elif dttm_col.python_date_format == 'epoch_ms':
dttm_expr = \
db_engine_spec.epoch_ms_to_dttm().format(col=dttm_expr)
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr), type_=DateTime).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
outer_from = text(dttm_col.dttm_sql_literal(from_dttm))
outer_to = text(dttm_col.dttm_sql_literal(to_dttm))
time_filter = [
timestamp >= outer_from,
timestamp <= outer_to,
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= text(
dttm_col.dttm_sql_literal(inner_from_dttm))
if inner_to_dttm:
inner_time_filter[1] = timestamp <= text(
dttm_col.dttm_sql_literal(inner_to_dttm))
else:
inner_time_filter = []
select_exprs += metrics_exprs
qry = select(select_exprs)
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
# Supporting arbitrary SQL statements in place of tables
if self.sql:
tbl = TextAsFrom(sqla.text(self.sql), []).alias('expr_qry')
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '').strip() for types in splitted]
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras:
where = extras.get('where')
if where:
where_clause_and += [wrap_clause_in_parens(
template_processor.process_template(where))]
having = extras.get('having')
if having:
having_clause_and += [wrap_clause_in_parens(
template_processor.process_template(having))]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
elif orderby:
for col, ascending in orderby:
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
# some sql dialects require for order by expressions
# to also be in the select clause
inner_select_exprs += [main_metric_expr]
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
ob = main_metric_expr
if timeseries_limit_metric_expr is not None:
ob = timeseries_limit_metric_expr
subq = subq.order_by(desc(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(tbl)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True},),
)
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def get_sqla_table_object(self):
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception:
raise Exception(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information")
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = "{}".format(col.type).upper()
except Exception as e:
datatype = "UNKNOWN"
logging.error(
"Unrecognized data type in {}.{}".format(table, col.name))
logging.exception(e)
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.groupby = dbcol.is_string
dbcol.filterable = dbcol.is_string
dbcol.sum = dbcol.isnum
dbcol.avg = dbcol.isnum
dbcol.is_dttm = dbcol.is_time
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.avg:
metrics.append(M(
metric_name='avg__' + dbcol.column_name,
verbose_name='avg__' + dbcol.column_name,
metric_type='avg',
expression="AVG({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
@classmethod
def import_obj(cls, datasource_to_import, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
caravel instances. Audit metadata isn't copies over.
"""
session = db.session
make_transient(datasource_to_import)
logging.info('Started import of the datasource: {}'
.format(datasource_to_import.to_json()))
datasource_to_import.id = None
database_name = datasource_to_import.params_dict['database_name']
datasource_to_import.database_id = session.query(Database).filter_by(
database_name=database_name).one().id
datasource_to_import.alter_params(import_time=import_time)
# override the datasource
datasource = (
session.query(SqlaTable).join(Database)
.filter(
SqlaTable.table_name == datasource_to_import.table_name,
SqlaTable.schema == datasource_to_import.schema,
Database.id == datasource_to_import.database_id,
)
.first()
)
if datasource:
datasource.override(datasource_to_import)
session.flush()
else:
datasource = datasource_to_import.copy()
session.add(datasource)
session.flush()
for m in datasource_to_import.metrics:
new_m = m.copy()
new_m.table_id = datasource.id
logging.info('Importing metric {} from the datasource: {}'.format(
new_m.to_json(), datasource_to_import.full_name))
imported_m = SqlMetric.import_obj(new_m)
if imported_m not in datasource.metrics:
datasource.metrics.append(imported_m)
for c in datasource_to_import.columns:
new_c = c.copy()
new_c.table_id = datasource.id
logging.info('Importing column {} from the datasource: {}'.format(
new_c.to_json(), datasource_to_import.full_name))
imported_c = TableColumn.import_obj(new_c)
if imported_c not in datasource.columns:
datasource.columns.append(imported_c)
db.session.flush()
return datasource.id
class SqlMetric(Model, AuditMixinNullable, ImportMixin):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable',
backref=backref('metrics', cascade='all, delete-orphan'),
foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'table_id', 'expression',
'description', 'is_restricted', 'd3format')
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
@classmethod
def import_obj(cls, metric_to_import):
session = db.session
make_transient(metric_to_import)
metric_to_import.id = None
# find if the column was already imported
existing_metric = session.query(SqlMetric).filter(
SqlMetric.table_id == metric_to_import.table_id,
SqlMetric.metric_name == metric_to_import.metric_name).first()
metric_to_import.table = None
if existing_metric:
existing_metric.override(metric_to_import)
session.flush()
return existing_metric
session.add(metric_to_import)
session.flush()
return metric_to_import
class TableColumn(Model, AuditMixinNullable, ImportMixin):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable',
backref=backref('columns', cascade='all, delete-orphan'),
foreign_keys=[table_id])
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
avg = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
python_date_format = Column(String(255))
database_expression = Column(String(255))
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
date_types = ('DATE', 'TIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
export_fields = (
'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active',
'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min',
'filterable', 'expression', 'description', 'python_date_format',
'database_expression'
)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return any([t in self.type.upper() for t in self.num_types])
@property
def is_time(self):
return any([t in self.type.upper() for t in self.date_types])
@property
def is_string(self):
return any([t in self.type.upper() for t in self.str_types])
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
@classmethod
def import_obj(cls, column_to_import):
session = db.session
make_transient(column_to_import)
column_to_import.id = None
column_to_import.table = None
# find if the column was already imported
existing_column = session.query(TableColumn).filter(
TableColumn.table_id == column_to_import.table_id,
TableColumn.column_name == column_to_import.column_name).first()
column_to_import.table = None
if existing_column:
existing_column.override(column_to_import)
session.flush()
return existing_column
session.add(column_to_import)
session.flush()
return column_to_import
def dttm_sql_literal(self, dttm):
"""Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf == 'epoch_s':
return str((dttm - datetime(1970, 1, 1)).total_seconds())
elif tf == 'epoch_ms':
return str((dttm - datetime(1970, 1, 1)).total_seconds() * 1000.0)
else:
s = self.table.database.db_engine_spec.convert_dttm(
self.type, dttm)
return s or "'{}'".format(dttm.strftime(tf))
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/"
"{obj.coordinator_endpoint}/datasources"
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/status"
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(self):
self.druid_version = self.get_druid_version()
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
@property
def perm(self):
return "[{obj.cluster_name}].(id:{obj.id})".format(obj=self)
@property
def name(self):
return self.cluster_name
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "druiddatasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(255), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
'User',
backref=backref('datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def database(self):
return self.cluster
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def database(self):
return self.cluster
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{obj.cluster_name}].[{obj.datasource_name}]"
"(id:{obj.id})").format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
"time_columns": [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
"time_grains": ['now']
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1)
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=start)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db_from_config(cls, druid_config, user, cluster):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session()
datasource = (
session.query(DruidDatasource)
.filter_by(
datasource_name=druid_config['name'])
).first()
# Create a new datasource.
if not datasource:
datasource = DruidDatasource(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
dimensions = druid_config['dimensions']
for dim in dimensions:
col_obj = (
session.query(DruidColumn)
.filter_by(
datasource_name=druid_config['name'],
column_name=dim)
).first()
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type="STRING",
datasource=datasource
)
session.add(col_obj)
# Import Druid metrics
for metric_spec in druid_config["metrics_spec"]:
metric_name = metric_spec["name"]
metric_type = metric_spec["type"]
metric_json = json.dumps(metric_spec)
if metric_type == "count":
metric_type = "longSum"
metric_json = json.dumps({
"type": "longSum",
"name": metric_name,
"fieldName": metric_name,
})
metric_obj = (
session.query(DruidMetric)
.filter_by(
datasource_name=druid_config['name'],
metric_name=metric_name)
).first()
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name="%s(%s)" % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
"Imported from the airolap config dir for %s" %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
logging.info("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher("Adding new datasource [{}]".format(name), "success")
else:
flasher("Refreshing datasource [{}]".format(name), "info")
session.flush()
datasource.cluster = cluster
session.flush()
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if datatype == "hyperUnique" or datatype == "thetaSketch":
col_obj.count_distinct = True
if col_obj:
col_obj.type = cols[col]['type']
session.flush()
col_obj.datasource = datasource
col_obj.generate_metrics()
session.flush()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timezone'] = timezone
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
def query( # druid
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, ):
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
timezone = from_dttm.tzname()
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_fields = _conf.get('fields', [])
field_names = []
for _f in _fields:
_type = _f.get('type')
if _type in ['fieldAccess', 'hyperUniqueCardinality']:
field_names.append(_f.get('fieldName'))
elif _type == 'arithmetic':
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
conf = metric.json_obj
all_metrics += recursive_get_fields(conf)
all_metrics += conf.get('fieldNames', [])
if conf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=conf.get('name'),
field_names=conf.get('fieldNames'),
function=conf.get('function'))
else:
post_aggs[metric_name] = Postaggregator(
conf.get('fn', "/"),
conf.get('fields', []),
conf.get('name', ''))
aggregations = OrderedDict()
for m in self.metrics:
if m.metric_name in all_metrics:
aggregations[m.metric_name] = m.json_obj
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
"Access to the metrics denied: " + ', '.join(rejected_metrics)
)
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity, timezone=timezone),
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = self.get_filters(filter)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if len(groupby) == 0:
del qry['dimensions']
client.timeseries(**qry)
if len(groupby) == 1:
if not timeseries_limit:
timeseries_limit = 10000
qry['threshold'] = timeseries_limit
qry['dimension'] = qry.get('dimensions')[0]
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
elif len(groupby) > 1:
if timeseries_limit and is_timeseries:
order_by = metrics[0] if metrics else self.metrics[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = "all"
pre_qry['limit_spec'] = {
"type": "default",
"limit": timeseries_limit,
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
"columns": [{
"dimension": order_by,
"direction": "descending",
}],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += "\n"
query_str += (
"//\nPhase 2 (built based on phase one's results)\n")
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry['dimensions']
filters = []
for unused, row in df.iterrows():
fields = []
for dim in dims:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(filt)
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry['filter'] = ff
else:
qry['filter'] = Filter(type="and", fields=[
ff,
orig_filters])
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
"type": "default",
"limit": row_limit,
"columns": [{
"dimension": (
metrics[0] if metrics else self.metrics[0]),
"direction": "descending",
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_("No data was returned."))
if (
not is_timeseries and
granularity == "all" and
'timestamp' in df.columns):
del df['timestamp']
# Reordering columns
cols = []
if 'timestamp' in df.columns:
cols += ['timestamp']
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(granularity)
def increment_timestamp(ts):
dt = utils.parse_human_datetime(ts).replace(
tzinfo=config.get("DRUID_TZ"))
return dt + timedelta(milliseconds=time_offset)
if 'timestamp' in df.columns and time_offset:
df.timestamp = df.timestamp.apply(increment_timestamp)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters):
filters = None
for col, op, eq in raw_filters:
cond = None
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = ~(Dimension(col) == eq)
elif op in ('in', 'not in'):
fields = []
# Distinguish quoted values with regular value types
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '') for types in splitted]
if len(values) > 1:
for s in values:
s = s.strip()
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type="regex", pattern=eq, dimension=col)
if filters:
filters = Filter(type="and", fields=[
cond,
filters
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>'
}
for col, op, eq in raw_filters:
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
class Log(Model):
"""ORM object used to log Caravel actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship('User', backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=func.now())
dt = Column(Date, default=date.today())
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
d.update(kwargs)
slice_id = d.get('slice_id', 0)
try:
slice_id = int(slice_id) if slice_id else 0
except ValueError:
slice_id = 0
params = ""
try:
params = json.dumps(d)
except:
pass
log = cls(
action=f.__name__,
json=params,
dashboard_id=d.get('dashboard_id') or None,
slice_id=slice_id,
user_id=user_id)
db.session.add(log)
db.session.commit()
return f(*args, **kwargs)
return wrapper
class DruidMetric(Model, AuditMixinNullable):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.datasource.full_name
) if self.datasource else None
class DruidColumn(Model, AuditMixinNullable):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
id = Column(Integer, primary_key=True)
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
column_name = Column(String(255))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
avg = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return self.type in ('LONG', 'DOUBLE', 'FLOAT', 'INT')
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = DruidMetric # noqa
metrics = []
metrics.append(DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'})
))
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.isnum:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.avg and self.isnum:
mt = corrected_type.lower() + 'Avg'
name = 'avg__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.min and self.isnum:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.max and self.isnum:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name
})
))
else:
mt = 'count_distinct'
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]})
))
session = get_session()
new_metrics = []
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
new_metrics.append(metric)
session.add(metric)
session.flush()
utils.init_metrics_perm(caravel, new_metrics)
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=func.now())
class QueryStatus:
CANCELLED = 'cancelled'
FAILED = 'failed'
PENDING = 'pending'
RUNNING = 'running'
SCHEDULED = 'scheduled'
SUCCESS = 'success'
TIMED_OUT = 'timed_out'
class Query(Model):
"""ORM model for SQL query"""
__tablename__ = 'query'
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True, nullable=False)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(
Integer, ForeignKey('ab_user.id'), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the caravel config.
limit = Column(Integer)
limit_used = Column(Boolean, default=False)
limit_reached = Column(Boolean, default=False)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# key used to store the results in the results backend
results_key = Column(String(64))
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=3))
end_time = Column(Numeric(precision=3))
changed_on = Column(
DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True)
database = relationship(
'Database', foreign_keys=[database_id], backref='queries')
user = relationship(
'User',
backref=backref('queries', cascade='all, delete-orphan'),
foreign_keys=[user_id])
__table_args__ = (
sqla.Index('ti_user_id_changed_on', user_id, changed_on),
)
@property
def limit_reached(self):
return self.rows == self.limit if self.limit_used else False
def to_dict(self):
return {
'changedOn': self.changed_on,
'changed_on': self.changed_on.isoformat(),
'dbId': self.database_id,
'db': self.database.database_name,
'endDttm': self.end_time,
'errorMessage': self.error_message,
'executedSql': self.executed_sql,
'id': self.client_id,
'limit': self.limit,
'progress': self.progress,
'rows': self.rows,
'schema': self.schema,
'ctas': self.select_as_cta,
'serverId': self.id,
'sql': self.sql,
'sqlEditorId': self.sql_editor_id,
'startDttm': self.start_time,
'state': self.status.lower(),
'tab': self.tab_name,
'tempTable': self.tmp_table_name,
'userId': self.user_id,
'user': self.user.username,
'limit_reached': self.limit_reached,
'resultsKey': self.results_key,
}
@property
def name(self):
ts = datetime.now().isoformat()
ts = ts.replace('-', '').replace(':', '').split('.')[0]
tab = self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab'
tab = re.sub(r'\W+', '', tab)
return "sqllab_{tab}_{ts}".format(**locals())
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = 'access_request'
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(['Admin', 'Alpha', 'Gamma', 'Public'])
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link
@property
def roles_with_datasource(self):
action_list = ''
pv = sm.find_permission_view_menu(
'datasource_access', self.datasource.perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_grant={r.name}'
.format(**locals())
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
@property
def user_roles(self):
action_list = ''
for r in self.created_by.roles:
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_extend={r.name}'
.format(**locals())
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = "{} Role".format(r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
```
|
{
"source": "jerome-colin/mtools",
"score": 2
}
|
#### File: mtools/common/Roi.py
```python
__author__ = "jerome.colin'at'ces<EMAIL>"
__license__ = "MIT"
__version__ = "1.0.3"
import numpy as np
from scipy import stats
import sys
class Roi_collection:
"""
A collection of ROIs defined according to the coordinate file given to roistats
"""
def __init__(self, fname, extent, logger, delimiter=','):
"""
:param fname: the coordinate file
:param extent: in mters
:param logger:
:param delimiter:
"""
self.fname = fname
self.extent = extent
self.logger = logger
self.delimiter = delimiter
self.logger.info("Checking coordinates consistency...")
try:
self.coord_arr = np.loadtxt(self.fname, delimiter=self.delimiter)
self.logger.info("Found %i coordinates pairs" % (len(self.coord_arr)))
if len(self.coord_arr) == 0:
self.logger.error("Coordinates file empty ?")
sys.exit(2)
for c in range(len(self.coord_arr)):
self.logger.debug(self.coord_arr[c])
except ValueError as err:
self.logger.error(err)
self.logger.error("Wrong value in coordinates file (or un-managed header line)")
sys.exit(2)
except FileNotFoundError as err:
self.logger.error(err)
sys.exit(1)
# Extent value consistency check
self.logger.info("Checking extent value consistency...")
if self.extent <= 0:
self.logger.error("Wrong extent given : %i" % self.extent)
sys.exit(2)
def compute_stats_all_bands(self, product, logger, stdout=False, withAOT=False, withVAP=False):
"""
Print statistiques for all bands of a product for all ROIs in collectoin
:param product: a Product instance
:param logger:
:param quicklook: not yet implemented
:return:
"""
# Get the list of bands to compute stats for
bands = product.band_names
if withAOT:
bands.append("AOT.")
if withVAP:
bands.append("VAP.")
list_stats = []
# For each Roi in Roi_collection:
for i in range(len(self.coord_arr)):
# Get an ROI object
roi_n = Roi(self.coord_arr[i], self.extent, logger)
# Get the corresponding mask
clm = product.get_band_subset(product.find_band(product.clm_name), roi=roi_n)
edg = product.get_band_subset(product.find_band(product.edg_name), roi=roi_n)
mask = product.get_mask(clm, edg)
# For each SRE band in product, extract a subset according to ROI and return stats
for band in bands:
# samples, minmax, avg, variance, skewness, kurtosis
band_size = mask.size
stats = self.compute_stats_oneband(roi_n, product, band, mask=mask)
list_stats.append(stats)
if stdout:
if stats is not None:
print("%s, %s, %s, %i, %i, %6.1f%%, %10.8f, %10.8f, %10.8f, %10.8f" %
(product.name, roi_n.id, band[:-1], band_size, stats[0], stats[0]/band_size*100, stats[1][0], stats[1][1], stats[2], stats[3]))
else:
print("%s, %s, %s, no valid pixel in ROI (fully cloudy or out of edge)" % (product.name, roi_n.id, band[:-1]))
return list_stats
def compute_stats_oneband(self, roi, product, band, mask=None):
"""
:param roi: Roi object
:param product: Product object
:param band: a string that helps identify a file
:return:
"""
if band == "AOT.":
subset = product.get_band_subset(product.find_band(product.aot_name), roi=roi, scalef=product.aot_scalef, layer=product.aot_layer)
elif band == "VAP.":
subset = product.get_band_subset(product.find_band(product.vap_name), roi=roi, scalef=product.vap_scalef, layer=product.vap_layer)
else:
subset = product.get_band_subset(product.find_band(band), roi=roi, scalef=product.sre_scalef)
if mask is not None:
search = np.where(mask == 1)
valid_pixels = subset[search]
else:
valid_pixels = subset
try:
return stats.describe(valid_pixels, axis=None)
except ValueError:
return None
class Roi:
def __init__(self, id_utmx_utmy, extent, logger):
"""
Returns an ROI instance
:param id_utmx_utmy: a vector containing an id(int), utmx(float) and utmy(float).
:param extent: in meters
:param logger:
"""
self.id = str(int(id_utmx_utmy[0]))
self.utmx = id_utmx_utmy[1]
self.utmy = id_utmx_utmy[2]
self.extent = extent
# Compute ulx, uly, lrx, lry assuming UTM coordinates
self.ulx = self.utmx - self.extent / 2
self.uly = self.utmy + self.extent / 2
self.lrx = self.utmx + self.extent / 2
self.lry = self.utmy - self.extent / 2
logger.info('ROI id %s: ulx=%i, uly=%i, lrx=%i, lry=%i' % (self.id, self.ulx, self.uly, self.lrx, self.lry))
```
#### File: mtools/common/test_Product.py
```python
__author__ = "jerome.colin'at'<EMAIL>"
__license__ = "MIT"
__version__ = "1.0.3"
import os
import pytest
import Product
import Roi
import utilities
import numpy
import osgeo.gdal
from matplotlib import pylab as pl
TEST_DATA_PATH = os.environ['TEST_DATA_PATH']
logger = utilities.get_logger('test_Product', verbose=True)
# TEST REFACTORED PRODUCT FROM HERE
## TESTING PRODUCT_DIR (DEFAULT)
def test_product_dir():
logger.info("TESTING PRODUCT_DIR (DEFAULT)")
p_dir = Product.Product(TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0",
logger)
assert type(p_dir.content_list) is list
p_dir_filename = p_dir.find_band("SRE_B4.")
assert p_dir_filename == TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0_SRE_B4.tif"
p_dir_band = p_dir.get_band(p_dir.find_band("SRE_B4."))
assert type(p_dir_band) is numpy.ndarray
assert p_dir_band[0,0] == 850
assert p_dir_band[6, 2] == 1249
b4_subset = p_dir.get_band_subset(p_dir.find_band("SRE_B4."), ulx=649455, uly=4238445, lrx=649465, lry=4238435)
assert type(b4_subset) is numpy.ndarray
def test_product_mask_use_nan():
logger.info("TESTING PRODUCT GET_MASK WITH NAN")
p_dir = Product.Product(TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0",
logger)
clm = numpy.zeros((3,3)) + 33
edg = numpy.zeros_like(clm) + 1
clm[1,:] = numpy.nan
clm[2, :] = numpy.nan
assert utilities.count_nan(clm) == 6
edg[:,1] = numpy.nan
assert utilities.count_nan(edg) == 3
mask, ratio = p_dir.get_mask(clm,edg, stats=True, use_nodata=True)
assert numpy.sum(mask) == 2
assert ratio == pytest.approx(2/9*100)
assert mask[1,1] == 1
assert mask[2,1] == 1
logger.debug("test_product_mask ratio=%6.4f" % ratio)
def test_product_mask_use_zeros():
logger.info("TESTING PRODUCT GET_MASK WITH ZEROS")
p_dir = Product.Product(TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0",
logger)
clm = numpy.zeros((3,3)) + 33
edg = numpy.zeros_like(clm) + 1
clm[1,:] = 0
clm[2, :] = 0
assert numpy.count_nonzero(clm) == 3
edg[:,1] = 0
assert numpy.count_nonzero(edg) == 6
mask, ratio = p_dir.get_mask(clm,edg, stats=True, use_nodata=False)
assert numpy.sum(mask) == 2
assert ratio == pytest.approx(2/9*100)
assert mask[1,1] == 1
assert mask[2,1] == 1
logger.debug("test_product_mask ratio=%6.4f" % ratio)
## TEST PRODUCT_ZIP_VENUS
def test_product_zip_venus():
logger.info("TEST PRODUCT_ZIP_VENUS")
p_zip_venus = Product.Product_zip_venus(TEST_DATA_PATH + "VENUS-XS_20200402-191352-000_L2A_GALLOP30_D.zip", logger)
assert p_zip_venus.name == "VENUS-XS_20200402-191352-000_L2A_GALLOP30_D.zip"
assert p_zip_venus.band_names[0] == "SRE_B1."
assert p_zip_venus.clm_name == "CLM_XS"
assert p_zip_venus.edg_name == "EDG_XS"
logger.info("test_product_zip_venus_get_content_list")
b4_filename = p_zip_venus.find_band("SRE_B4.")
assert b4_filename == "VENUS-XS_20200402-191352-000_L2A_GALLOP30_C_V2-2/VENUS-XS_20200402-191352-000_L2A_GALLOP30_C_V2-2_SRE_B4.tif"
logger.info("test_product_zip_venus_get_band")
b4 = p_zip_venus.get_band(b4_filename)
assert type(b4) is numpy.ndarray
logger.debug("b4 is of size %i, with shape %s" % (numpy.size(b4), str(numpy.shape(b4))))
assert numpy.shape(b4)[0] == 11686
assert numpy.shape(b4)[1] == 11711
b4 = None
logger.info("test_product_zip_venus_get_band_aot")
atb_filename = p_zip_venus.find_band("ATB_XS")
assert atb_filename == "VENUS-XS_20200402-191352-000_L2A_GALLOP30_C_V2-2/VENUS-XS_20200402-191352-000_L2A_GALLOP30_C_V2-2_ATB_XS.tif"
atb_bands = p_zip_venus.get_band(atb_filename)
logger.debug("atb_bands is of type %s" % str(type(atb_bands)))
logger.debug("atb_bands is of size %i, with shape %s" % (numpy.size(atb_bands), str(numpy.shape(atb_bands))))
assert type(atb_bands) is numpy.ndarray
assert numpy.shape(atb_bands)[0] == 2
atb_bands = None
aot = p_zip_venus.get_band(atb_filename, layer=1)
logger.debug("aot is of size %i, with shape %s" % (numpy.size(aot), str(numpy.shape(aot))))
assert numpy.shape(aot)[0] == 11686
assert numpy.shape(aot)[1] == 11711
aot = None
aot = p_zip_venus.get_band(p_zip_venus.find_band(p_zip_venus.aot_name), scalef=p_zip_venus.aot_scalef, layer=p_zip_venus.aot_layer)
logger.debug("aot is of size %i, with shape %s" % (numpy.size(aot), str(numpy.shape(aot))))
assert numpy.shape(aot)[0] == 11686
assert numpy.shape(aot)[1] == 11711
aot = None
vap = p_zip_venus.get_band(p_zip_venus.find_band(p_zip_venus.vap_name), scalef=p_zip_venus.vap_scalef, layer=p_zip_venus.vap_layer)
logger.debug("vap is of size %i, with shape %s" % (numpy.size(vap), str(numpy.shape(vap))))
assert numpy.shape(vap)[0] == 11686
assert numpy.shape(vap)[1] == 11711
vap = None
logger.info("test_product_zip_venus_get_band_subset")
b4_subset = p_zip_venus.get_band_subset(b4_filename, ulx=649455, uly=4238445, lrx=649465, lry=4238435)
assert type(b4_subset) is numpy.ndarray
assert b4_subset[0, 0] == 93
assert b4_subset[1, 0] == 86
assert b4_subset[0, 1] == 113
assert b4_subset[1, 1] == 94
assert p_zip_venus.sre_scalef == 1000
b4_subset_SRE = p_zip_venus.get_band_subset(b4_filename, ulx=649455, uly=4238445, lrx=649465, lry=4238435, scalef=p_zip_venus.sre_scalef)
assert b4_subset_SRE[0, 0] == 0.093
assert b4_subset_SRE[1, 0] == 0.086
assert b4_subset_SRE[0, 1] == 0.113
assert b4_subset_SRE[1, 1] == 0.094
logger.info("test_product_zip_venus_get_band_subset_withROI")
roi = Roi.Roi([99, 649460, 4238440], 10, logger)
b4_subset_SRE = p_zip_venus.get_band_subset(b4_filename, roi=roi, scalef=p_zip_venus.sre_scalef)
assert b4_subset_SRE[0, 0] == 0.093
assert b4_subset_SRE[1, 0] == 0.086
assert b4_subset_SRE[0, 1] == 0.113
assert b4_subset_SRE[1, 1] == 0.094
aot = p_zip_venus.get_band_subset(p_zip_venus.find_band(p_zip_venus.aot_name), ulx=649455, uly=4238445, lrx=649465, lry=4238435, scalef=p_zip_venus.aot_scalef, layer=p_zip_venus.aot_layer)
logger.debug("aot is of size %i, with shape %s" % (numpy.size(aot), str(numpy.shape(aot))))
assert aot[0, 0] == 0.195
assert aot[1, 0] == 0.195
assert aot[0, 1] == 0.195
assert aot[1, 1] == 0.195
vap = p_zip_venus.get_band_subset(p_zip_venus.find_band(p_zip_venus.vap_name), ulx=649455, uly=4238445, lrx=649465, lry=4238435, scalef=p_zip_venus.vap_scalef, layer=p_zip_venus.vap_layer)
logger.debug("vap is of size %i, with shape %s" % (numpy.size(vap), str(numpy.shape(vap))))
assert vap[0, 0] == 0.55
assert vap[1, 0] == 0.55
assert vap[0, 1] == 0.55
assert vap[1, 1] == 0.55
## TEST PRODUCT_HDF
def test_product_hdf():
logger.info("TEST PRODUCT_HDF")
p_hdf_vermote = Product.Product_hdf(TEST_DATA_PATH + "vermote_carpentras/refsrs2-L1C_T31TFJ_A003037_20171005T104550-Carpentras.hdf", logger)
assert p_hdf_vermote.ptype == "HDF"
logger.info("test_product_hdf_content_list")
logger.debug(type(p_hdf_vermote.content_list))
logger.debug(p_hdf_vermote.content_list)
assert type(p_hdf_vermote.content_list) is list
logger.info("test_product_hdf_find_band")
subdsid = p_hdf_vermote.find_band("band04-red")
assert type(subdsid) is int
assert subdsid == 3
logger.info("test_product_hdf_get_band")
assert type(p_hdf_vermote.get_band(p_hdf_vermote.find_band("band04-red"))) is numpy.ndarray
assert p_hdf_vermote.get_band(p_hdf_vermote.find_band("band04-red"))[0][0] == 596
assert p_hdf_vermote.get_band(p_hdf_vermote.find_band("band04-red"))[6][2] == 1096
def test_product_hdf_acix():
p_hdf_acix = Product.Product_hdf_acix(TEST_DATA_PATH + "vermote_carpentras/refsrs2-L1C_T31TFJ_A012260_20171027T103128-Carpentras.hdf", logger)
assert p_hdf_acix.sre_scalef == 10000
b7 = p_hdf_acix.get_band(p_hdf_acix.find_band("band07"), scalef=p_hdf_acix.sre_scalef)
assert type(b7) is numpy.ndarray
assert b7[12,5] == 0.1829
assert b7[899,899] == 0.2021
## TESTING PRODUCT_DIR_MAJA
def test_product_dir_maja():
logger.info("TESTING PRODUCT_DIR_MAJA")
p_dir_maja = Product.Product_dir_maja(TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0",
logger)
assert type(p_dir_maja.content_list) is list
p_dir_b4_filename = p_dir_maja.find_band("SRE_B4.")
assert p_dir_b4_filename == TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0_SRE_B4.tif"
p_dir_band = p_dir_maja.get_band(p_dir_b4_filename, scalef=p_dir_maja.sre_scalef)
assert type(p_dir_band) is numpy.ndarray
assert p_dir_band[0,0] == 0.0850
assert p_dir_band[6, 2] == 0.1249
roi = Roi.Roi([88, 660260, 4887620], 20, logger)
b4_subset = p_dir_maja.get_band_subset(p_dir_maja.find_band("SRE_B4."), roi=roi, scalef=p_dir_maja.sre_scalef)
assert type(b4_subset) is numpy.ndarray
assert b4_subset[0, 0] == 0.1362
assert b4_subset[1, 0] == 0.1451
assert b4_subset[0, 1] == 0.1173
assert b4_subset[1, 1] == 0.1306
```
|
{
"source": "jeromecoutant/pyOCD",
"score": 2
}
|
#### File: pyocd/probe/common.py
```python
import logging
LOG = logging.getLogger(__name__)
## Whether the warning about no libusb was printed already.
#
# Used to prevent warning spewage if repeatedly scanning for probes, such as when ConnectHelper
# is used in blocking mode and no probes are connected.
did_show_no_libusb_warning = False
def show_no_libusb_warning():
global did_show_no_libusb_warning
if not did_show_no_libusb_warning:
LOG.warning("STLink and CMSIS-DAPv2 probes are not supported because no libusb library was found.")
did_show_no_libusb_warning = True
```
#### File: target/builtin/target_MAX32630.py
```python
from ...flash.flash import Flash
from ...core.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
import logging
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4603B510, 0x4893460C, 0x68414448, 0xF0006888, 0xB1087080, 0xBD102001, 0x4448488E, 0x60486880,
0xE7F82000, 0x488B4602, 0x68414448, 0xF0206888, 0x60884070, 0x47702000, 0x44484886, 0x68886841,
0x7080F000, 0x2001B108, 0x6A484770, 0x2000B148, 0x6A486248, 0x2002B128, 0x6A486248, 0x2001B108,
0x6888E7F2, 0x4070F020, 0x5000F040, 0x20006088, 0xB510E7EA, 0x44484877, 0xF7FF6844, 0xB108FFDD,
0xBD102001, 0xF42068A0, 0xF440407F, 0x60A0402A, 0xF04068A0, 0x60A00002, 0x68A0BF00, 0x7080F000,
0xD1FA2800, 0xF02068A0, 0x60A04070, 0xF0006A60, 0xB1080002, 0xE7E42001, 0xE7E22000, 0x4605B570,
0x44484864, 0xF7FF6844, 0xB108FFB7, 0xBD702001, 0xF42068A0, 0xF440407F, 0x60A040AA, 0x68A06025,
0x0004F040, 0xBF0060A0, 0xF00068A0, 0x28007080, 0x68A0D1FA, 0x4070F020, 0x6A6060A0, 0x0002F000,
0x2001B108, 0x2000E7E3, 0xE92DE7E1, 0x460747F0, 0x4690468A, 0x4448484F, 0x46566844, 0xF0084645,
0xB1100003, 0xE8BD2001, 0x464587F0, 0xFF84F7FF, 0x2001B108, 0x68A0E7F7, 0x6000F020, 0x68A060A0,
0x0010F040, 0xE00E60A0, 0xCD016027, 0x68A06320, 0x0001F040, 0xBF0060A0, 0xF00068A0, 0x28007080,
0x1D3FD1FA, 0x2E041F36, 0xF007D303, 0x2800001F, 0x4838D1EA, 0x68C04448, 0xD1212880, 0xD31F2E10,
0xF02068A0, 0x60A00010, 0xF04068A0, 0x60A06000, 0x6027E014, 0x6320CD01, 0x6360CD01, 0x63A0CD01,
0x63E0CD01, 0xF04068A0, 0x60A00001, 0x68A0BF00, 0x7080F000, 0xD1FA2800, 0x3E103710, 0xD2E82E10,
0xD3192E04, 0xF02068A0, 0x60A06000, 0xF04068A0, 0x60A00010, 0x6027E00E, 0x6320CD01, 0xF04068A0,
0x60A00001, 0x68A0BF00, 0x7080F000, 0xD1FA2800, 0x1F361D3F, 0xD2EE2E04, 0x68A2B306, 0x6200F022,
0x68A260A2, 0x0210F042, 0xF04F60A2, 0x21FF30FF, 0x682AE005, 0x0201EA62, 0x02094010, 0x2E001E76,
0x6027D1F7, 0x68A26320, 0x0201F042, 0xBF0060A2, 0xF00268A2, 0x2A007280, 0xBF00D1FA, 0xF02068A0,
0x60A04070, 0xF0006A60, 0xB1080002, 0xE76A2001, 0xE7682000, 0x00000004, 0x00000000, 0x00000000,
# FLC_BASE, CLK_DIV, BRST_SIZE, FLASH_BASE, FLASH_SIZE, FLASH_SECTOR
0x40002000, 0x00000060, 0x00000020, 0x00000000, 0x00200000, 0x00002000
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x20000093,
'pc_erase_sector' : 0x200000DD,
'pc_program_page' : 0x2000012B,
'begin_data' : 0x20004000, # Analyzer uses a max of 128B data (32 pages * 4 bytes / page)
'page_buffers' : [0x20006000, 0x20008000], # Enable double buffering
'begin_stack' : 0x20002000,
'static_base' : 0x20000278,
'min_program_length' : 4,
'analyzer_supported' : True,
'analyzer_address' : 0x2000A000 # Analyzer 0x2000A000..0x2000A600
}
class MAX32630(CoreSightTarget):
VENDOR = "Maxim"
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x200000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x40000),
)
def __init__(self, link):
super(MAX32630, self).__init__(link, self.memoryMap)
self._svd_location = SVDFile.from_builtin("max32630.svd")
```
#### File: target/builtin/target_MK64FN1M0xxx12.py
```python
from ..family.target_kinetis import Kinetis
from ..family.flash_kinetis import Flash_Kinetis
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
import logging
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4604b570, 0x4616460d, 0x5020f24c, 0x81c84932, 0x1028f64d, 0x460881c8, 0xf0208800, 0x80080001,
0x4448482e, 0xf8dcf000, 0x2001b108, 0x2000bd70, 0x4601e7fc, 0x47702000, 0x4929b510, 0x44484827,
0xf8b8f000, 0xb92c4604, 0x48242100, 0xf0004448, 0x4604f9a9, 0xf837f000, 0xbd104620, 0x4604b570,
0x4448481e, 0x46214b1e, 0xf00068c2, 0x4605f85d, 0x481ab93d, 0x23004448, 0x68c24621, 0xf946f000,
0xf0004605, 0x4628f820, 0xb5febd70, 0x460c4605, 0x46234616, 0x46294632, 0x44484810, 0xf8f8f000,
0xb9674607, 0x22012000, 0x2000e9cd, 0x46224633, 0x90024629, 0x44484809, 0xf984f000, 0xf0004607,
0x4638f802, 0x4807bdfe, 0xf4206840, 0xf5000070, 0x49040070, 0x47706048, 0x40052000, 0x00000004,
0x6b65666b, 0x4001f000, 0x4a0e2070, 0x20807010, 0xbf007010, 0x7800480b, 0x280009c0, 0x4809d0fa,
0xf0017801, 0xb1080020, 0x47702067, 0x0010f001, 0x2068b108, 0xf001e7f9, 0xb1080001, 0xe7f42069,
0xe7f22000, 0x40020000, 0x4df0e92d, 0x460d4604, 0x469a4690, 0xf0004650, 0x4606f891, 0x4630b116,
0x8df0e8bd, 0x46422310, 0x46204629, 0xf86cf000, 0xb10e4606, 0xe7f34630, 0x0008eb05, 0x68e01e47,
0xf1f0fbb7, 0x7011fb00, 0x68e0b140, 0xf0f0fbb7, 0x0b01f100, 0xfb0068e0, 0x1e47f00b, 0x480be011,
0x68004478, 0x20096005, 0x71c84909, 0xffacf7ff, 0x69a04606, 0x69a0b108, 0xb1064780, 0x68e0e003,
0x42bd4405, 0xbf00d9eb, 0xe7c94630, 0x000002ec, 0x40020000, 0x4604b570, 0x4628460d, 0xf84ef000,
0xb10e4606, 0xbd704630, 0x2004b90c, 0x2044e7fb, 0x71c84902, 0xff88f7ff, 0x0000e7f5, 0x40020000,
0xb9094601, 0x47702004, 0x6cc04826, 0x6003f3c0, 0x447b4b25, 0x0010f833, 0xb90a0302, 0xe7f22064,
0x60082000, 0x2002604a, 0x02c06088, 0x200060c8, 0x61486108, 0xbf006188, 0x4602e7e5, 0x2004b90a,
0x61914770, 0xe7fb2000, 0x4604b530, 0x2004b90c, 0x1e58bd30, 0xb9104008, 0x40101e58, 0x2065b108,
0x6820e7f6, 0xd8054288, 0x0500e9d4, 0x188d4428, 0xd20142a8, 0xe7eb2066, 0xe7e92000, 0x480b4601,
0xd0014281, 0x4770206b, 0xe7fc2000, 0xb90b4603, 0x47702004, 0xd801290f, 0xd0012a04, 0xe7f82004,
0xe7f62000, 0x40048000, 0x0000025a, 0x6b65666b, 0x41f0e92d, 0x46884607, 0x461d4614, 0x2004b914,
0x81f0e8bd, 0x462a2308, 0x46384641, 0xffbcf7ff, 0xb10e4606, 0xe7f34630, 0x4812e01f, 0x68004478,
0x8000f8c0, 0x490fcc01, 0x390c4479, 0x60486809, 0x490ccc01, 0x39184479, 0x60886809, 0x490a2007,
0xf7ff71c8, 0x4606ff01, 0xb10869b8, 0x478069b8, 0xe004b106, 0x0808f108, 0x2d003d08, 0xbf00d1dd,
0xe7cd4630, 0x000001b0, 0x40020000, 0x4dffe92d, 0x4682b082, 0x2310460c, 0x46504621, 0xf7ff9a04,
0x4683ff83, 0x0f00f1bb, 0x4658d003, 0xe8bdb006, 0xe9da8df0, 0xfbb00101, 0x4260f7f1, 0x40084279,
0x42a54245, 0x443dd100, 0xe0229e04, 0x0804eba5, 0xd90045b0, 0xea4f46b0, 0x90011018, 0x4478480f,
0x60046800, 0x490e2001, 0x980171c8, 0x72c80a00, 0x72889801, 0x72489805, 0xfeb6f7ff, 0xf1bb4683,
0xd0010f00, 0xe7d14658, 0x0608eba6, 0x443d4444, 0x2e00bf00, 0x2000d1da, 0x0000e7c8, 0x0000010e,
0x40020000, 0x4604b570, 0xb90c460d, 0xbd702004, 0x49032040, 0x460871c8, 0xf7ff7185, 0xe7f6fe95,
0x40020000, 0x4dffe92d, 0x4617460c, 0xe9dd461d, 0xf8ddb80c, 0xb91da038, 0xb0042004, 0x8df0e8bd,
0x463a2304, 0x98004621, 0xff1ef7ff, 0xb10e4606, 0xe7f24630, 0x4814e022, 0x68004478, 0x20026004,
0x71c84912, 0xf8804608, 0x490fb00b, 0x39144479, 0x68096828, 0xf7ff6088, 0x4606fe67, 0xf1b8b15e,
0xd0010f00, 0x4000f8c8, 0x0f00f1ba, 0x2000d002, 0x0000f8ca, 0x1f3fe004, 0x1d241d2d, 0xd1da2f00,
0x4630bf00, 0x0000e7c9, 0x00000074, 0x40020000, 0x00000000, 0x00080000, 0x00100000, 0x00200000,
0x00400000, 0x00800000, 0x01000000, 0x01000000, 0x40020004, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x20000059,
'pc_erase_sector' : 0x2000007D,
'pc_program_page' : 0x200000AB,
'begin_stack' : 0x20001000,
'begin_data' : 0x20003000, # Analyzer uses a max of 1024 B data (256 pages * 4 bytes / page)
'page_buffers' : [0x20003000, 0x20004000], # Enable double buffering
'static_base' : 0x20000000 + 0x20 + 0x474,
'min_program_length' : 8,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff000 # Analyzer 0x1ffff000..0x1ffff600
}
class K64F(Kinetis):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x100000, blocksize=0x1000, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1fff0000, length=0x40000)
)
def __init__(self, link):
super(K64F, self).__init__(link, self.memoryMap)
self._svd_location = SVDFile.from_builtin("MK64F12.svd")
```
|
{
"source": "JeromeCui/annoy",
"score": 3
}
|
#### File: annoy/test/hamming_index_test.py
```python
import numpy
import random
from common import TestCase
from annoy import AnnoyIndex
class HammingIndexTest(TestCase):
def test_basic_conversion(self):
f = 100
i = AnnoyIndex(f, 'hamming')
u = numpy.random.binomial(1, 0.5, f)
v = numpy.random.binomial(1, 0.5, f)
i.add_item(0, u)
i.add_item(1, v)
u2 = i.get_item_vector(0)
v2 = i.get_item_vector(1)
self.assertAlmostEqual(numpy.dot(u - u2, u - u2), 0.0)
self.assertAlmostEqual(numpy.dot(v - v2, v - v2), 0.0)
self.assertAlmostEqual(i.get_distance(0, 0), 0.0)
self.assertAlmostEqual(i.get_distance(1, 1), 0.0)
self.assertAlmostEqual(i.get_distance(0, 1), numpy.dot(u - v, u - v))
self.assertAlmostEqual(i.get_distance(1, 0), numpy.dot(u - v, u - v))
def test_basic_nns(self):
f = 100
i = AnnoyIndex(f, 'hamming')
u = numpy.random.binomial(1, 0.5, f)
v = numpy.random.binomial(1, 0.5, f)
i.add_item(0, u)
i.add_item(1, v)
i.build(10)
self.assertEquals(i.get_nns_by_item(0, 99), [0, 1])
self.assertEquals(i.get_nns_by_item(1, 99), [1, 0])
rs, ds = i.get_nns_by_item(0, 99, include_distances=True)
self.assertEquals(rs, [0, 1])
self.assertAlmostEqual(ds[0], 0)
self.assertAlmostEqual(ds[1], numpy.dot(u-v, u-v))
def test_save_load(self):
f = 100
i = AnnoyIndex(f, 'hamming')
u = numpy.random.binomial(1, 0.5, f)
v = numpy.random.binomial(1, 0.5, f)
i.add_item(0, u)
i.add_item(1, v)
i.build(10)
i.save('blah.ann')
j = AnnoyIndex(f, 'hamming')
j.load('blah.ann')
rs, ds = j.get_nns_by_item(0, 99, include_distances=True)
self.assertEquals(rs, [0, 1])
self.assertAlmostEqual(ds[0], 0)
self.assertAlmostEqual(ds[1], numpy.dot(u-v, u-v))
def test_many_vectors(self):
f = 10
i = AnnoyIndex(f, 'hamming')
for x in range(100000):
i.add_item(x, numpy.random.binomial(1, 0.5, f))
i.build(10)
rs, ds = i.get_nns_by_vector([0]*f, 10000, include_distances=True)
self.assertGreaterEqual(min(ds), 0)
self.assertLessEqual(max(ds), f)
dists = []
for x in range(1000):
rs, ds = i.get_nns_by_vector(numpy.random.binomial(1, 0.5, f), 1, search_k=1000, include_distances=True)
dists.append(ds[0])
avg_dist = 1.0 * sum(dists) / len(dists)
self.assertLessEqual(avg_dist, 0.42)
```
|
{
"source": "jeromecyang/ltsoj",
"score": 2
}
|
#### File: ltsoj/_utils/lib.py
```python
import sys, re, os, yaml
from collections import OrderedDict
path = os.path.dirname(os.path.realpath(__file__)) + '/../_episodes/'
def get_all_episodes():
return sorted(os.listdir(path))
def add_to_data(raw_content, data_to_add):
data_list = raw_content.split('---')
data_list[1] = data_list[1] + data_to_add
return "---".join(data_list)
def read_data(episode):
f = open(path + episode)
content = f.read()
data_list = content.split('---')
return yaml.load(data_list[1], Loader=yaml.SafeLoader)
def write_data(episode, data):
f = open(path + episode)
content = f.read()
data_list = content.split('---')
str_data = '\n' + yaml.dump(data, sort_keys=False, allow_unicode=True, width=1000)
new_content = "---".join([data_list[0], str_data, *data_list[2:]])
g = open(path + episode, 'w')
g.write(new_content)
g.close()
def split_and_get_nth(text, pattern, index):
parts = text.split(pattern)
if (len(parts) > index):
return parts[index]
return ''
def get_section(content, index):
sections = re.split(r'[^#]##\s.*?\n', split_and_get_nth(content, '---\n', 2), flags=re.S)
return sections[index]
def read_content(episode):
f = open(path + episode)
content = f.read()
return content
def write_content(episode, content):
g = open(path + episode, 'w')
g.write(content)
g.close()
```
|
{
"source": "jeromedockes/cogspaces",
"score": 3
}
|
#### File: cogspaces/modules/factored.py
```python
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from cogspaces.datasets import fetch_atlas_modl
from cogspaces.modules.linear import DropoutLinear
class LatentClassifier(nn.Module):
def __init__(self, latent_size, target_size, var_penalty,
dropout=0., adaptive=False, batch_norm=True):
"""
One third-layer classification head.
Simply combines batch-norm -> DropoutLinear -> softmax.
Parameters
----------
latent_size : int
Size of the latent space.
target_size : int
Number of targets for the classifier.
var_penalty : float
Penalty to apply for variational latent_dropout
dropout : float, [0, 1]
Dropout rate to apply at the input
adaptive : bool
Use adaptive latent_dropout rate
batch_norm : bool
Use batch normalization at the input
"""
super().__init__()
if batch_norm:
self.batch_norm = nn.BatchNorm1d(latent_size, affine=False, )
self.linear = DropoutLinear(latent_size,
target_size, bias=True, p=dropout,
var_penalty=var_penalty,
adaptive=adaptive, level='layer')
def forward(self, input, logits=False):
if hasattr(self, 'batch_norm'):
if not self.training or len(input) > 1:
input = self.batch_norm(input)
logits_ = self.linear(input)
if logits:
return logits_
else:
return F.log_softmax(logits_, dim=1)
def reset_parameters(self):
self.linear.reset_parameters()
self.batch_norm.reset_parameters()
def penalty(self):
return self.linear.penalty()
def get_dropout(self):
return self.linear.get_dropout()
class VarMultiStudyModule(nn.Module):
def __init__(self, in_features,
latent_size,
target_sizes,
lengths,
input_dropout=0.,
regularization=1.,
latent_dropout=0.,
init='orthogonal',
batch_norm=True,
adaptive=False,
):
"""
Second and third-layer of the models.
Parameters
----------
in_features : int
Size of the input before the second layer
(number of resting-state loadings).
latent_size : int
Size of the latent dimension, in between the second and third layer.
target_sizes: Dict[str, int]
For each study, number of contrasts to predict
lengths: Dict[str, int]
Length of each study (for variational regularization)
input_dropout: float, [0, 1]
regularization: float, default=1
Regularization to apply for variational latent_dropout
latent_dropout: float, default=1
Dropout rate to apply in between the second and third layer.
init: str, {'normal', 'orthogonal', 'resting-state'}
How to initialize the second layer. If 'resting-state',
then it must be in_features = 453 and latent_size = 128
batch_norm: bool,
Batch norm between the second and third layer
adaptive: bool,
Dropout rate should be adaptive
"""
super().__init__()
total_length = sum(list(lengths.values()))
self.embedder = DropoutLinear(
in_features, latent_size, adaptive=adaptive,
var_penalty=regularization / total_length, level='layer',
p=input_dropout, bias=True)
self.classifiers = {study: LatentClassifier(
latent_size, target_size, dropout=latent_dropout,
var_penalty=regularization / lengths[study],
batch_norm=batch_norm, adaptive=adaptive, )
for study, target_size in target_sizes.items()}
for study, classifier in self.classifiers.items():
self.add_module('classifier_%s' % study, classifier)
self.init = init
def reset_parameters(self):
self.embedder.weight.data = self.get_embedder_init()
nn.init.zeros_(self.embedder.bias.data)
self.embedder.reset_dropout()
for classifier in self.classifiers.values():
classifier.reset_parameters()
def get_embedder_init(self):
weight = torch.empty_like(self.embedder.weight.data)
gain = 1. / math.sqrt(weight.shape[1])
if self.init == 'normal':
self.weight.data.uniform_(-gain, gain)
elif self.init == 'orthogonal':
nn.init.orthogonal_(weight, gain=gain)
elif self.init == 'resting-state':
dataset = fetch_atlas_modl()
weight = np.load(dataset['loadings_128_gm'])
weight = torch.from_numpy(np.array(weight))
return weight
def forward(self, inputs, logits=False):
preds = {}
for study, input in inputs.items():
preds[study] = self.classifiers[study](self.embedder(input),
logits=logits)
return preds
def penalty(self, studies):
"""
Return the variational penalty of the model.
Parameters
----------
studies: Iterable[str],
Studies to consider when computing the penalty
Returns
-------
penalty: torch.tensor,
Scalar penalty
"""
return (self.embedder.penalty()
+ sum(self.classifiers[study].penalty()
for study in studies))
def get_dropout(self):
return (self.embedder.get_dropout(),
{study: classifier.get_dropout() for study, classifier in
self.classifiers.items()})
```
#### File: cogspaces/modules/linear.py
```python
import math
import torch
from torch import nn
from torch.nn import Parameter, functional as F
k1 = 0.63576
k2 = 1.87320
k3 = 1.48695
class DropoutLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, p=1e-8,
level='layer', var_penalty=0., adaptive=False,
sparsify=False):
super().__init__(in_features, out_features, bias)
self.p = p
self.var_penalty = var_penalty
if level == 'layer':
self.log_alpha = Parameter(torch.Tensor(1, 1),
requires_grad=adaptive)
elif level == 'atom':
self.log_alpha = Parameter(torch.Tensor(1, in_features),
requires_grad=adaptive)
elif level == 'coef':
self.log_alpha = Parameter(torch.Tensor(out_features, in_features),
requires_grad=adaptive)
elif level == 'additive':
assert adaptive
self.log_sigma2 = Parameter(
torch.Tensor(out_features, in_features),
requires_grad=True)
else:
raise ValueError()
self.sparsify = sparsify
self.adaptive = adaptive
self.level = level
self.reset_dropout()
def reset_parameters(self):
super().reset_parameters()
if hasattr(self, 'level'):
self.reset_dropout()
def reset_dropout(self):
if self.p > 0:
log_alpha = math.log(self.p) - math.log(1 - self.p)
if self.level != 'additive':
self.log_alpha.data.fill_(log_alpha)
else:
self.log_sigma2.data = log_alpha + torch.log(
self.weight.data ** 2 + 1e-8)
def make_additive(self):
assert self.level != 'additive'
self.log_alpha.requires_grad = False
self.level = 'additive'
self.adaptive = True
out_features, in_features = self.weight.shape
self.log_sigma2 = Parameter(torch.Tensor(out_features, in_features),
requires_grad=True)
self.log_sigma2.data = (self.log_alpha.expand(*self.weight.shape)
+ torch.log(self.weight ** 2 + 1e-8)
).detach()
self.log_alpha.requires_grad = False
def make_non_adaptive(self):
assert self.level != 'additive'
self.adaptive = False
self.log_alpha.requires_grad = False
def make_adaptive(self):
assert self.level != 'additive'
self.adaptive = True
self.log_alpha.requires_grad = True
def get_var_weight(self):
if self.level == 'additive':
return torch.exp(self.log_sigma2)
# return self.sigma ** 2
else:
return torch.exp(self.log_alpha) * self.weight ** 2
def get_log_alpha(self):
if self.level == 'additive':
return torch.clamp(
self.log_sigma2 - torch.log(self.weight ** 2 + 1e-8), -8, 8)
else:
return torch.clamp(self.log_alpha, -8, 8)
def get_dropout(self):
return 1 / (1 + torch.exp(-self.get_log_alpha())).squeeze().detach()
def forward(self, input):
if self.training:
if self.p == 0:
return F.linear(input, self.weight, self.bias)
if self.adaptive:
output = F.linear(input, self.weight, self.bias)
# Local reparemtrization trick: gaussian latent_dropout noise on input
# <-> gaussian noise on output
std = torch.sqrt(
F.linear(input ** 2, self.get_var_weight(), None) + 1e-8)
eps = torch.randn_like(output, requires_grad=False)
return output + std * eps
else:
eps = torch.randn_like(input, requires_grad=False)
input = input * (
1 + torch.exp(.5 * self.get_log_alpha()) * eps)
return F.linear(input, self.weight, self.bias)
else:
if self.sparsify:
weight = self.sparse_weight
else:
weight = self.weight
return F.linear(input, weight, self.bias)
def penalty(self):
if not self.adaptive or self.var_penalty == 0:
return torch.tensor(0., device=self.weight.device,
dtype=torch.float)
else:
log_alpha = self.get_log_alpha()
var_penalty = - k1 * (torch.sigmoid(k2 + k3 * log_alpha)
- .5 * F.softplus(-log_alpha)
- 1).expand(*self.weight.shape).sum()
return var_penalty * self.var_penalty
@property
def density(self):
return (self.sparse_weight != 0).float().mean().item()
@property
def sparse_weight(self):
mask = self.get_log_alpha() > 3
return self.weight.masked_fill(mask, 0)
```
#### File: cogspaces/modules/loss.py
```python
from typing import Dict, Tuple
import torch
from torch import nn
from torch.nn import functional as F
class MultiStudyLoss(nn.Module):
def __init__(self, study_weights: Dict[str, float],
) -> None:
super().__init__()
self.study_weights = study_weights
def forward(self, preds: Dict[str, torch.FloatTensor],
targets: Dict[str, torch.LongTensor]) \
-> Tuple[torch.FloatTensor, torch.FloatTensor]:
loss = 0
for study in preds:
pred = preds[study]
target = targets[study]
this_loss = F.nll_loss(pred, target, reduction='elementwise_mean')
loss += this_loss * self.study_weights[study]
return loss
```
#### File: cogspaces/plotting/volume.py
```python
import os
from itertools import repeat
from os.path import join
import numpy as np
from cogspaces.datasets import fetch_mask
from joblib import Parallel, delayed
from matplotlib.colors import LinearSegmentedColormap, rgb_to_hsv, hsv_to_rgb
from nilearn._utils import check_niimg
from nilearn.datasets import fetch_surf_fsaverage5
from nilearn.image import iter_img
from nilearn.input_data import NiftiMasker
def make_cmap(color, rotation=.5, white=False, transparent_zero=False):
h, s, v = rgb_to_hsv(color)
h = h + rotation
if h > 1:
h -= 1
r, g, b = color
ri, gi, bi = hsv_to_rgb((h, s, v))
colors = {'direct': (ri, gi, bi), 'inverted': (r, g, b)}
cdict = {}
for direction, (r, g, b) in colors.items():
if white:
cdict[direction] = {color: [(0.0, 0.0416, 0.0416),
(0.18, c, c),
(0.5, 1, 1),
(0.62, 0.0, 0.0),
(1.0, 0.0416, 0.0416)] for color, c in
[('blue', b), ('red', r), ('green', g)]}
else:
cdict[direction] = {color: [(0.0, 1, 1),
(0.32, c, c),
(0.5, 0.0416, 0.0416),
(0.5, 0.0, 0.0),
(0.87, 0.0, 0.0),
(1.0, 1, 1)] for color, c in
[('blue', b), ('red', r), ('green', g)]}
if transparent_zero:
cdict[direction]['alpha']: [(0, 1, 1), (0.5, 0, 0), (1, 1, 1)]
cmap = LinearSegmentedColormap('cmap', cdict['direct'])
cmapi = LinearSegmentedColormap('cmap', cdict['inverted'])
cmap._init()
cmapi._init()
cmap._lut = np.maximum(cmap._lut, cmapi._lut[::-1])
# Big hack from nilearn (WTF !?)
cmap._lut[-1, -1] = 0
return cmap
def plot_single(img, name, output_dir, view_types=['stat_map'], color=None,
threshold=0):
from nilearn.plotting import plot_stat_map, find_xyz_cut_coords, plot_glass_brain
if color is not None:
cmap = make_cmap(color, rotation=.5)
cmap_white = make_cmap(color, rotation=.5, white=True)
else:
cmap = 'cold_hot'
cmap_white = 'cold_white_hot'
srcs = []
vmax = np.abs(img.get_data()).max()
threshold = vmax / 8
for view_type in view_types:
src = join(output_dir, '%s_%s.png' % (name, view_type))
cut_coords = find_xyz_cut_coords(img, activation_threshold=vmax / 3)
if view_type == 'stat_map':
plot_stat_map(img, threshold=threshold,
cut_coords=cut_coords,
vmax=vmax,
colorbar=True,
output_file=src,
# cmap=cmap
)
elif view_type == 'glass_brain':
plot_glass_brain(img, threshold=threshold,
vmax=vmax,
plot_abs=False,
output_file=src,
colorbar=True,
# cmap=cmap_white
)
else:
raise ValueError('Wrong view type in `view_types`: got %s' %
view_type)
srcs.append(src)
return srcs, name
def numbered_names(name):
i = 0
while True:
yield '%s_%i' % (name, i)
i += 1
def plot_4d_image(img, names=None, output_dir=None,
colors=None,
view_types=['stat_map'],
threshold=True,
n_jobs=1, verbose=10):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if colors is None:
colors = repeat(None)
if 'surf_stat_map_right' in view_types or 'surf_stat_map_left' in view_types:
fetch_surf_fsaverage5()
filename = img
img = check_niimg(img, ensure_ndim=4)
img.get_data()
if names is None or isinstance(names, str):
if names is None:
dirname, filename = os.path.split(filename)
names = filename.replace('.nii.gz', '')
names = numbered_names(names)
else:
assert len(names) == img.get_shape()[3]
mask = fetch_mask()
masker = NiftiMasker(mask_img=mask).fit()
components = masker.transform(img)
n_components = len(components)
threshold = np.percentile(np.abs(components),
100. * (1 - 1. / n_components)) if threshold else 0
imgs = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(plot_single)(img, name, output_dir, view_types, color,
threshold=threshold)
for name, img, color in zip(names, iter_img(img), colors))
return imgs
```
|
{
"source": "jeromedockes/dataset_shift_biomarkers",
"score": 3
}
|
#### File: simulations/datashift/datasets.py
```python
import numpy as np
from scipy.stats import multivariate_normal, norm
import pandas as pd
from sklearn.utils import check_random_state
def quadratic_additive_noise(
x_mean=0, x_std=1.0, n_points=100, noise_level=1.0, random_state=0
):
"""y = x**2 + e"""
rng = check_random_state(random_state)
x = rng.normal(x_mean, x_std, size=n_points)
noise = rng.normal(size=n_points) * noise_level
y = x ** 2 + noise
return x, y
def blobs(mean=[0], cov=[1.0], size=100, random_state=0):
X = np.empty((len(mean) * size, len(mean[0])), dtype=float)
y = np.empty(len(mean) * size)
for i, (m, c) in enumerate(zip(mean, cov)):
ii = i * size
distrib = multivariate_normal(mean=m, cov=c)
X[ii : ii + size] = distrib.rvs(size=size, random_state=random_state)
y[ii : ii + size] = i
return X, y
def parabola(
rot=0, offset=0, c=0.2, noise=1.0, size=200, x=None, mode="laplace", loc=0
):
rng = np.random.RandomState(0)
if x is None:
if mode == "uniform":
xmin, xmax = -5, 5
x = rng.uniform(xmin, xmax, size)
elif mode == "laplace":
x = rng.laplace(loc=loc, size=size)
else:
raise ValueError("mode not understood: {}".format(mode))
y = c * x ** 2 + offset + noise * rng.normal(size=size)
A = np.asarray([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
return A.dot([x, y]).T
def parabola_multiclass(size=100, **kwargs):
Xt = parabola(np.pi / 4, size=size, **kwargs)
yt = np.zeros(len(Xt), dtype=int)
Xf = parabola(np.pi / 4, 3.0, size=size, **kwargs)
yf = np.ones(len(Xf), dtype=int)
X = np.concatenate((Xt, Xf), axis=0)
y = np.concatenate((yt, yf), axis=0)
return X, y
class BlobsGenerator:
def __init__(
self, random_state=0, class_1_mean=(7., 0.), z_direction=(1, 1),
z_noise=.3
):
self.rng = check_random_state(random_state)
self.class_0_mean = (0, 0)
self.class_1_mean = class_1_mean
self.z_direction = np.asarray(
(np.cos(z_direction), np.sin(z_direction)))
self.z_noise = z_noise
def sample(self, size=200):
y = self.rng.binomial(1, 0.5, size=size)
n0 = (y == 0).sum()
n1 = (y == 1).sum()
X = np.empty((size, 2))
X[y == 0] = self.rng.multivariate_normal(
self.class_0_mean, np.eye(2), size=n0)
X[y == 1] = self.rng.multivariate_normal(
self.class_1_mean, np.eye(2), size=n1)
z = self.z_direction.dot(X.T) + self.rng.normal(0., self.z_noise, size)
return pd.DataFrame({"x1": X[:, 0], "x2": X[:, 1], "z": z, "y": y})
class ParabolasGenerator:
def __init__(
self,
z_loc=0,
z_scale=1.0,
c=0.3,
rot=np.pi / 4,
class_offset=3.0,
x1_noise=0.7,
x2_noise=1.0,
y_noise=0.2,
random_state=0,
):
self.rng = check_random_state(random_state)
self.z_loc = z_loc
self.z_scale = z_scale
self.c = c
self.rot = rot
self.class_offset = class_offset
self.x1_noise = x1_noise
self.x2_noise = x2_noise
self.y_noise = y_noise
self.A = np.asarray(
[
[np.cos(self.rot), np.sin(self.rot)],
[-np.sin(self.rot), np.cos(self.rot)],
]
)
def sample(self, size=200):
z = self.rng.normal(self.z_loc, self.z_scale, size=size)
y = (z > 0).astype(int)
y_noise = self.rng.binomial(1, self.y_noise, size=size)
y = y * (1 - y_noise) + (1 - y) * y_noise
x1 = z + self.rng.normal(0.0, self.x1_noise, size=size)
offset = self.class_offset * y
x2 = (
self.c * x1 ** 2
+ offset
+ self.x2_noise * self.rng.normal(size=size)
)
x1, x2 = self.A.dot([x1, x2])
return pd.DataFrame({"x1": x1, "x2": x2, "z": z, "y": y})
def score(self, z, normalize=True):
distrib = norm(self.z_loc, self.z_scale)
scores = distrib.pdf(z)
if normalize:
scores /= scores.sum()
return scores
```
|
{
"source": "jeromedockes/neuroquery_image_search",
"score": 2
}
|
#### File: neuroquery_image_search/tests/conftest.py
```python
from pathlib import Path
import tempfile
from unittest.mock import MagicMock
import pytest
import numpy as np
import pandas as pd
from scipy import sparse
import nibabel
import nilearn
from nilearn.datasets import _testing
from nilearn.datasets._testing import request_mocker # noqa: F401
def make_fake_img():
rng = np.random.default_rng(0)
img = rng.random(size=(4, 3, 5))
return nibabel.Nifti1Image(img, np.eye(4))
@pytest.fixture()
def fake_img():
return make_fake_img()
def make_fake_data():
n_voxels, n_components, n_studies, n_terms = 23, 8, 12, 9
rng = np.random.default_rng(0)
difumo_maps = rng.random((n_components, n_voxels))
difumo_maps[rng.binomial(1, 0.3, size=difumo_maps.shape).astype(int)] = 0
difumo_inverse_covariance = np.linalg.pinv(difumo_maps.dot(difumo_maps.T))
difumo_maps = sparse.csr_matrix(difumo_maps)
projections = rng.random((n_studies, n_components))
term_projections = rng.random((n_terms, n_components))
articles_info = pd.DataFrame({"pmid": np.arange(n_studies) + 100})
articles_info["title"] = [
f"title {pmid}" for pmid in articles_info["pmid"]
]
articles_info["pubmed_url"] = [
f"url {pmid}" for pmid in articles_info["pmid"]
]
mask = np.zeros(4 * 3 * 5, dtype=int)
mask[:n_voxels] = 1
mask = mask.reshape((4, 3, 5))
mask_img = nibabel.Nifti1Image(mask, np.eye(4))
doc_freq = pd.DataFrame(
{
"term": ["term_{i}" for i in range(n_terms)],
"document_frequency": np.arange(n_terms),
}
)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)
sparse.save_npz(temp_dir / "difumo_maps.npz", difumo_maps)
np.save(
temp_dir / "difumo_inverse_covariance.npy",
difumo_inverse_covariance,
)
np.save(temp_dir / "projections.npy", projections)
np.save(temp_dir / "term_projections.npy", term_projections)
articles_info.to_csv(temp_dir / "articles-info.csv", index=False)
mask_img.to_filename(str(temp_dir / "mask.nii.gz"))
doc_freq.to_csv(
str(temp_dir / "document_frequencies.csv"), index=False
)
archive = _testing.dict_to_archive(
{"neuroquery_image_search_data": temp_dir}
)
return archive
@pytest.fixture(autouse=True)
def temp_data_dir(tmp_path_factory, monkeypatch):
home_dir = tmp_path_factory.mktemp("temp_home")
monkeypatch.setenv("HOME", str(home_dir))
monkeypatch.setenv("USERPROFILE", str(home_dir))
data_dir = home_dir / "neuroquery_data"
data_dir.mkdir()
monkeypatch.setenv("NEUROQUERY_DATA", str(data_dir))
@pytest.fixture(autouse=True, scope="function")
def map_mock_requests(request_mocker):
request_mocker.url_mapping[
"https://osf.io/mx3t4/download"
] = make_fake_data()
return request_mocker
@pytest.fixture(autouse=True)
def patch_nilearn(monkeypatch):
def fake_motor_task(*args, **kwargs):
return {"images": [make_fake_img()]}
monkeypatch.setattr(
nilearn.datasets, "fetch_neurovault_motor_task", fake_motor_task
)
monkeypatch.setattr("webbrowser.open", MagicMock())
```
#### File: neuroquery_image_search/tests/test_searching.py
```python
import numpy as np
import pandas as pd
from nilearn import image
import json
import pytest
from neuroquery_image_search import _searching, _datasets
def test_image_search(tmp_path, fake_img):
img_path = str(tmp_path / "img.nii.gz")
fake_img.to_filename(img_path)
results_path = tmp_path / "results.json"
_searching.image_search(
f"{img_path} -o {results_path} --n_studies 7 --n_terms 3".split()
)
results = json.loads(results_path.read_text())
study_results = pd.DataFrame(results["studies"])
assert study_results.shape == (7, 4)
assert np.allclose(study_results.reset_index().at[0, "similarity"], 1.0)
results_path = tmp_path / "results.html"
_searching.image_search(
[img_path, "-o", str(results_path), "--n_studies", "1"]
)
results = results_path.read_text()
assert results.strip().startswith("<!DOCTYPE html>")
_searching.image_search(["-o", str(results_path), "--n_studies", "7"])
results = results_path.read_text()
assert "Image" in results
_searching.image_search([])
def test_json_encoder():
df = pd.DataFrame({"A": [2, 3]}, index=list("ab"))
data = {"a": {"B": 3.3}, "b": df}
as_json = json.dumps(data, cls=_searching._JSONEncoder)
loaded = json.loads(as_json)
loaded_df = pd.DataFrame(loaded["b"])
assert (df == loaded_df).all().all()
with pytest.raises(TypeError):
json.dumps({"a": json}, cls=_searching._JSONEncoder)
def test_neuroquery_image_search(fake_img):
search = _searching.NeuroQueryImageSearch()
results = search(fake_img, 20, transform="identity", rescale_similarities=False)
assert results["studies"]["similarity"].max() != pytest.approx(1.)
results = search(fake_img, 20, transform="identity")
assert results["terms"]["similarity"].min() == pytest.approx(0.)
assert results["studies"]["similarity"].max() == pytest.approx(1.)
results = results["studies"]
neg_img = image.new_img_like(fake_img, image.get_data(fake_img) * -1.0)
neg_results = search(neg_img, 20, transform="identity")["studies"]
assert (neg_results["pmid"].values == results["pmid"].values[::-1]).all()
results = search(fake_img, 20, transform="absolute_value")["studies"]
neg_results = search(neg_img, 20, transform="absolute_value")["studies"]
assert (neg_results["pmid"].values == results["pmid"]).all()
pos_img = image.new_img_like(
fake_img, np.maximum(0, image.get_data(fake_img))
)
results = search(fake_img, 20, transform="positive_part")["studies"]
pos_results = search(pos_img, 20, transform="identity")["studies"]
assert (pos_results["pmid"].values == results["pmid"]).all()
data = _datasets.fetch_data()
assert (search.data["studies_info"] == data["studies_info"]).all().all()
assert (
(search.data["document_frequencies"] == data["document_frequencies"])
.all()
.all()
)
```
|
{
"source": "jeromedockes/pylabelbuddy",
"score": 2
}
|
#### File: pylabelbuddy/tests/test_annotations_manager.py
```python
from pylabelbuddy import _annotations_manager, _database
def fill_db(data_dir):
_database.add_docs_from_file(data_dir / "docs_1.csv")
_database.add_labels_from_json(data_dir / "labels_1.json")
def test_annotations_manager(root, data_dir, prepare_db):
manager = _annotations_manager.AnnotationsManager(root)
assert manager.n_docs() == 30
new_db = data_dir / "new_db.sqlite"
_database.set_db_path(new_db)
con = _database.get_connection()
manager.change_database()
assert manager.n_docs() == 0
manager.visit_document(1)
assert manager.current_doc_id is None
for direction in ["prev", "next"]:
for kind in ["", "_labelled", "_unlabelled"]:
getattr(manager, f"visit_{direction}{kind}")()
assert manager.current_doc_id is None
fill_db(data_dir)
labels = manager.labels_info
assert len(labels) == 8
assert labels["something"] == {"id": 1, "color": "#ff00ff"}
manager.visit_document()
assert manager.current_doc_id == 1
manager.visit_document(2)
assert manager.current_doc_id == 2
assert (
manager.content
== con.execute("select content from document where id = 2").fetchone()[
"content"
]
)
manager.visit_document(2000)
assert manager.current_doc_id == 2
with con:
con.execute("update app_state set last_visited_doc = null")
manager.visit_document(2000)
assert manager.current_doc_id == 1
assert (
con.execute("select last_visited_doc from app_state").fetchone()[0]
== 1
)
with con:
con.executemany(
"insert into annotation (doc_id, label_id, start_char, end_char)"
" values (?, ?, ?, ?)",
[(7, 1, 2, 5), (10, 3, 5, 9)],
)
manager.visit_next()
assert manager.current_doc_id == 2
manager.visit_next_labelled()
assert manager.current_doc_id == 7
manager.visit_next_labelled()
assert manager.current_doc_id == 10
manager.visit_prev_unlabelled()
assert manager.current_doc_id == 9
manager.visit_prev_labelled()
assert manager.current_doc_id == 7
manager.visit_prev_labelled()
assert manager.current_doc_id == 7
manager.visit_prev_unlabelled()
assert manager.current_doc_id == 6
manager.visit_prev()
assert manager.current_doc_id == 5
manager.visit_document(6)
manager.visit_next_unlabelled()
assert manager.current_doc_id == 8
with con:
con.execute("delete from document where id = 8")
manager.refresh()
assert manager.current_doc_id == 1
manager.visit_document(1)
manager.visit_prev()
assert manager.current_doc_id == 1
manager.visit_prev_labelled()
assert manager.current_doc_id == 1
manager.visit_prev_unlabelled()
assert manager.current_doc_id == 1
manager.visit_document(30)
manager.visit_next()
assert manager.current_doc_id == 30
manager.visit_next_labelled()
assert manager.current_doc_id == 30
manager.visit_next_unlabelled()
assert manager.current_doc_id == 30
assert len(list(manager.existing_regions())) == 0
manager.visit_document(7)
assert len(list(manager.existing_regions())) == 1
assert manager.add_annotation("something-else", 0, 1) == 3
assert manager.add_annotation("unknown label", 0, 1) is None
assert len(list(manager.existing_regions())) == 2
manager.delete_annotation("1")
assert len(list(manager.existing_regions())) == 1
manager.delete_annotation("3")
assert len(list(manager.existing_regions())) == 0
manager.visit_prev()
manager.visit_next_labelled()
assert manager.current_doc_id == 10
manager.update_annotation_label("2", "unknown label")
manager.update_annotation_label("2", "something-else")
assert (
con.execute(
"select label_id from annotation where rowid = 2"
).fetchone()[0]
== 2
)
assert manager.last_doc() == 30
assert manager.first_doc() == 1
assert manager.first_unlabelled() == 1
assert manager.last_unlabelled() == 30
assert manager.first_labelled() == 10
assert manager.last_labelled() == 10
```
#### File: pylabelbuddy/tests/test_annotations_navigator.py
```python
from unittest.mock import MagicMock
from pylabelbuddy import _annotations_navigator
def test_annotations_navigator(root):
annotations_manager = MagicMock()
annotations_manager.first_doc.return_value = 2
annotations_manager.last_doc.return_value = 123
annotations_manager.first_labelled.return_value = 7
annotations_manager.last_labelled.return_value = 123
annotations_manager.first_unlabelled.return_value = 2
annotations_manager.last_unlabelled.return_value = 32
annotations_manager.current_doc_id = 9
navigator = _annotations_navigator.AnnotationsNavigator(
root, annotations_manager
)
for button_name in ["", "labelled_", "unlabelled_"]:
for direction in ["prev", "next"]:
assert (
getattr(navigator.nav_bar, f"{direction}_{button_name}button")[
"state"
]
== "normal"
)
annotations_manager.current_doc_id = 5
navigator.refresh()
assert navigator.nav_bar.prev_labelled_button["state"] == "disabled"
```
#### File: pylabelbuddy/tests/test_annotations_notebook.py
```python
from pylabelbuddy import _annotations_notebook
def test_annotations_notebook(root, annotations_mock, dataset_mock):
nb = _annotations_notebook.AnnotationsNotebook(
root, annotations_mock, dataset_mock
)
nb.change_database()
assert nb.notebook.index(nb.notebook.select()) == 2
nb.go_to_annotations()
assert nb.notebook.index(nb.notebook.select()) == 0
```
#### File: pylabelbuddy/tests/test_dataset_menu.py
```python
from unittest.mock import MagicMock
from pylabelbuddy import _dataset_menu
class _DB:
def __init__(self):
self.lab_ids = list(range(7, 40, 4))
self.unlab_ids = list(range(9, 59, 4))
self.all_ids = sorted(self.lab_ids + self.unlab_ids)
self.doc_ids = {
"all docs": self.all_ids,
"unlabelled docs": self.unlab_ids,
"labelled docs": self.lab_ids,
}
def get_docs(self, offset, page_size, doc_filter):
docs_ids = self.doc_ids[doc_filter][offset : offset + page_size]
return [
{"id": doc_id, "trunc_content": f"doc {doc_id} {doc_filter}"}
for doc_id in docs_ids
]
def n_docs(self, doc_filter):
return len(self.get_docs(0, 1000, doc_filter))
def test_documents_list(root, example_labels):
db = _DB()
assert db.n_docs("all docs") == 22
assert db.n_docs("labelled docs") == 9
assert db.n_docs("unlabelled docs") == 13
manager = MagicMock()
manager.get_labels.return_value = [
{"id": i + 1, "string_form": la["text"], "color": "#aabbcc"}
for i, la in enumerate(example_labels)
]
manager.total_n_docs.side_effect = db.n_docs
manager.get_docs.side_effect = db.get_docs
menu = _dataset_menu.DatasetMenu(root, manager)
doc_list = menu.documents_list
doc_list.page_size = 4
doc_list.fill()
assert doc_list.docs_info == db.get_docs(0, 4, "all docs")
doc_list.next_page()
assert doc_list.docs_info == db.get_docs(4, 4, "all docs")
doc_list.next_page()
assert doc_list.offset == 8
doc_list.doc_filter.set("labelled docs")
doc_list._filter_change()
assert doc_list.offset == 0
doc_list.last_page()
assert doc_list.offset == 8
doc_list.next_page()
doc_list.next_page()
assert doc_list.offset == 8
assert len(doc_list.docs_info) == 1
assert doc_list.docs_info == db.get_docs(8, 4, "labelled docs")
doc_list.docs_list.listbox.selection_set(0)
doc_list.go_to_annotations()
assert doc_list.requested_doc_id == 39
doc_list.delete_selection()
assert manager.delete_docs.call_args[0] == ([39],)
doc_list.prev_page()
assert doc_list.offset == 4
doc_list.first_page()
assert doc_list.offset == 0
manager.total_n_docs.side_effect = lambda *args: 0
doc_list.fill()
assert hasattr(doc_list, "empty_banner")
def test_labels_list(root, example_labels, monkeypatch):
color_chooser = MagicMock()
color_chooser.return_value = ((1, 2, 3), "#010203")
monkeypatch.setattr("tkinter.colorchooser.askcolor", color_chooser)
db = _DB()
manager = MagicMock()
labels = [
{"id": i + 1, "string_form": la["text"], "color": "#aabbcc"}
for i, la in enumerate(example_labels)
]
manager.get_labels.return_value = labels
manager.total_n_docs.side_effect = db.n_docs
manager.get_docs.side_effect = db.get_docs
menu = _dataset_menu.DatasetMenu(root, manager)
lab_list = menu.labels_list
assert lab_list.labels_info == labels
lab_list.labels_list.listbox.selection_set([1])
lab_list.labels_list.listbox.selection_set([3])
lab_list._update_button_states()
assert lab_list.delete_button["state"] == "normal"
lab_list._set_color_for_selection()
assert manager.set_label_color.call_args[0] == (
labels[1]["id"],
"#010203",
)
lab_list.delete_selection()
assert manager.delete_labels.call_args[0] == (
[labels[1]["id"], labels[3]["id"]],
)
manager.get_labels.side_effect = lambda *args: []
lab_list.fill()
assert hasattr(lab_list, "empty_banner")
```
#### File: pylabelbuddy/tests/test_searchable_text.py
```python
from pylabelbuddy import _searchable_text
def test_searchable_text(example_text):
root = None
text = example_text
searchable_text = _searchable_text.SearchableText(root, None)
searchable_text._fill(text)
assert searchable_text.text["state"] == "disabled"
searchable_text.search_box.search_string.set("maçã")
selected = searchable_text.text.tag_ranges("sel")
start = searchable_text.text.count("1.0", selected[0])[0]
end = searchable_text.text.count("1.0", selected[1])[0]
assert text[start : end + 1] == "maçã1"
searchable_text._search_next()
selected = searchable_text.text.tag_ranges("sel")
start = searchable_text.text.count("1.0", selected[0])[0]
end = searchable_text.text.count("1.0", selected[1])[0]
assert text[start : end + 1] == "maçã2"
searchable_text._search_prev()
selected = searchable_text.text.tag_ranges("sel")
start = searchable_text.text.count("1.0", selected[0])[0]
end = searchable_text.text.count("1.0", selected[1])[0]
assert text[start : end + 1] == "maçã1"
searchable_text._search_prev()
selected = searchable_text.text.tag_ranges("sel")
start = searchable_text.text.count("1.0", selected[0])[0]
end = searchable_text.text.count("1.0", selected[1])[0]
assert text[start : end + 1] == "maçã3"
```
|
{
"source": "jeromedontdev/tinkup",
"score": 2
}
|
#### File: jeromedontdev/tinkup/tinkup.py
```python
from cgitb import text
import queue
from random import seed
import serial
import serial.tools.list_ports
from signal import signal, SIGINT
import sys
import threading
import time
import tkinter
from tkinter import END, W, PhotoImage, filedialog as fd, scrolledtext as sd
global fw_filename
fw_filename = ""
COM_OVERRIDE=None
VERSION='1.0'
DEBUG=False
running = True
class PrintLogger():
def __init__(self, textbox):
self.textbox = textbox
def write(self, text):
self.textbox.insert(tkinter.END, text)
self.textbox.see(END)
def flush(self):
pass
def on_closing():
global running
running = False
def sig_handler(signal_received, frame):
on_closing()
class Tink:
cmd = {
'CmdGetVer': b'\x01',
'CmdErase': b'\x02',
'CmdWrite': b'\x03',
'JumpApp': b'\x05',
}
ctrl = {
'SOH': b'\x01',
'EOT': b'\x04',
'DLE': b'\x10',
}
rxfsm = {
'RxIdle': 0,
'RxBuffer': 1,
'RxEscape': 2,
}
blfsm = {
'BlIdle': 0,
'BlVersion': 1,
'BlErase': 2,
'BlWrite': 3,
'BlJump': 4,
}
serial = None
rx_state = rxfsm['RxIdle']
def timer(self, timestamp):
# 100ms interval timer
if running:
timestamp += 0.1
self.timer_thread = threading.Timer(timestamp - time.time(), self.timer, args=(timestamp,)).start()
def calc_crc(self, b):
# NOTE: This is the CRC lookup table for polynomial 0x1021
lut = [
0, 4129, 8258, 12387,\
16516, 20645, 24774, 28903,\
33032, 37161, 41290, 45419,\
49548, 53677, 57806, 61935]
num1 = 0
for num2 in b:
num3 = (num1 >> 12) ^ (num2 >> 4)
num4 = (lut[num3 & 0x0F] ^ (num1 << 4)) & 0xFFFF
num5 = (num4 >> 12) ^ num2
num1 = (lut[num5 & 0x0F] ^ (num4 << 4)) & 0xFFFF
return num1
def rx_process(self, packet, debug=DEBUG):
if debug:
print('Processing packet: %s' % packet.hex())
crc_rx = (packet[-1] << 8) | packet[-2]
if self.calc_crc(packet[0:-2]) != crc_rx:
print('Bad CRC received, resetting state')
self.bl_state = self.blfsm['BlIdle']
else:
cmd = bytes([packet[0]])
payload = packet[1:-2]
if self.bl_state == self.blfsm['BlVersion']:
if cmd == self.cmd['CmdGetVer']:
print('Found device ID: %s' % payload.decode().split('\x00')[0])
print('Erasing device... ', end='')
self.tx_packet(self.cmd['CmdErase'])
self.bl_state = self.blfsm['BlErase']
else:
print('ERROR: Expected response code CmdGetVer, got %s' % packet[0])
elif self.bl_state == self.blfsm['BlErase']:
if cmd == self.cmd['CmdErase']:
print('OKAY')
self.hex_line = 1
self.fw_file = open(self.fw_name, 'r')
tx = bytearray(self.cmd['CmdWrite'])
hex_line = bytes.fromhex(self.fw_file.readline().rstrip()[1:])
tx += hex_line
print('Writing firmware %d/%d... ' % (self.hex_line, self.hex_nline), end='')
self.tx_packet(tx)
self.bl_state = self.blfsm['BlWrite']
else:
print('ERROR: Expected response code CmdErase, got %s' % packet[0])
elif self.bl_state == self.blfsm['BlWrite']:
if cmd == self.cmd['CmdWrite']:
print('OKAY')
self.hex_line = self.hex_line + 1
# hex_line starts at 1, so we need to send up to and
# including hex_nline
if self.hex_line > self.hex_nline:
print('Update complete, booting firmware')
self.bl_state = self.blfsm['BlJump']
self.tx_packet(self.cmd['JumpApp'])
button_state()
return
# There doesnt seem to be a response to the JumpApp
# command, so at this point we're done.
self.running = False
else:
tx = bytearray(self.cmd['CmdWrite'])
hex_line = bytes.fromhex(self.fw_file.readline().rstrip()[1:])
tx += hex_line
print('Writing firmware %d/%d... ' % (self.hex_line, self.hex_nline), end='')
self.tx_packet(tx)
else:
print('ERROR: Expected response code CmdWrite, got %s' % packet[0])
def rx_buffer(self, b, debug=DEBUG):
state_begin = self.rx_state
if self.rx_state == self.rxfsm['RxIdle']:
# Ignore bytes until we see SOH
if b == self.ctrl['SOH']:
self.rxbuf = bytearray()
self.rx_state = self.rxfsm['RxBuffer']
elif self.rx_state == self.rxfsm['RxBuffer']:
if b == self.ctrl['DLE']:
# Escape the next control sequence
self.rx_state = self.rxfsm['RxEscape']
elif b == self.ctrl['EOT']:
# End of transmission
self.rx_state = self.rxfsm['RxIdle']
self.rx_process(self.rxbuf)
else:
# Buffer the byte
self.rxbuf += b
elif self.rx_state == self.rxfsm['RxEscape']:
# Unconditionally buffer any byte following the escape sequence
self.rxbuf += b
self.rx_state = self.rxfsm['RxBuffer']
else:
# Shouldn't get here
print('Unknown state')
self.rx_state = self.rxfsm['RxIdle']
if debug:
keys = list(self.rxfsm.keys())
vals = list(self.rxfsm.values())
s0 = vals.index(state_begin)
s1 = vals.index(self.rx_state)
print('RX: %s, RX FSM state: %s -> %s' % (b.hex(), keys[s0], keys[s1]))
def rx(self):
while running:
if self.serial:
b = self.serial.read(1)
if b:
self.rx_buffer(b)
else:
print('RX timeout?')
else:
print('Lost serial port')
time.sleep(1)
def tx(self, b, debug=DEBUG):
if debug:
print('TX: %s' % b.hex())
if self.serial and self.serial.is_open:
try:
self.serial.write(b)
self.serial.flush()
except:
print('TX failure')
button_state()
return
else:
print('TX failure, serial port not writeable')
button_state()
return
def tx_packet(self, b):
# b should be a bytearray
crc = self.calc_crc(b)
b += bytes([crc & 0xFF])
b += bytes([(crc >> 8) & 0xFF])
b_tx = bytearray(self.ctrl['SOH'])
for bb in b:
bb = bytes([bb])
# Escape any control characters that appear in the TX buffer
if bb == self.ctrl['SOH'] or bb == self.ctrl['EOT'] or bb == self.ctrl['DLE']:
b_tx += self.ctrl['DLE']
b_tx += bb
b_tx += self.ctrl['EOT']
self.tx(b_tx)
def __init__(self, fw_name=None, port=None):
self.rx_state = self.rxfsm['RxIdle']
self.bl_state = self.blfsm['BlIdle']
self.fw_name = fw_name
self.hex_nline = 0
self.hex_line = 0
# Ensure the file exists, has valid Intel Hex checksums, and count lines
try:
with open(self.fw_name) as fw_file:
for line in fw_file:
self.hex_nline = self.hex_nline + 1
line = line.rstrip()[1:]
try:
checksum = bytes.fromhex(line[-2:])
except:
print('%s is not a valid hex file' % fw_name)
button_state()
return
# It seems to just load hex if it's blank
data = bytes.fromhex(line[:-2])
s = bytes([((~(sum(data) & 0xFF) & 0xFF) + 1) & 0xFF])
if checksum != s:
print('%s is not a valid hex file' % fw_name)
button_state()
return
except:
print('No file selected')
button_state()
return
comports = []
try:
if port == None:
comports_all = [comport for comport in serial.tools.list_ports.comports()]
for com in comports_all:
if com.manufacturer == 'FTDI':
comports.append(com.device)
else:
comports.append(port)
if comports:
if len(comports) > 1:
print('Several FTDI devices detected - not sure which to target. Aborting.')
# TODO: Add interactive device selector?
button_state()
return
for com in comports:
try:
self.serial = serial.Serial(com, baudrate=115200, timeout=None, rtscts=True)
print('Opened device at %s' % com)
except Exception as ex:
print('Could not open device at %s' % com)
print('Exception: %s' % ex)
button_state()
return
else:
print('No RetroTINK devices found')
button_state()
return
except:
print('No communication with device')
button_state()
return
if self.serial:
self.rx_process_thread = threading.Thread(target=self.rx, args=())
self.rx_process_thread.daemon = True
self.rx_process_thread.start()
self.timer_thread = threading.Thread(target=self.timer, args=(time.time() + 0.1,))
self.timer_thread.daemon = True
self.timer_thread.start()
else:
button_state()
return
self.running = True
retries = 1
self.bl_state = self.blfsm['BlVersion']
while retries and running:
retries = retries - 1
print('Probing device... ', end='')
self.tx_packet(self.cmd['CmdGetVer'])
time.sleep(1)
# Need to add a timeout
def file_select():
filetypes = (
('hex files', '*.hex'),
('All files', '*.*')
)
fw_filename = fd.askopenfilename(
title='Select hex',
initialdir='/',
filetypes=filetypes)
browse_box.configure(state="normal")
browse_box.delete(0, END)
browse_box.insert(0,fw_filename)
browse_box.configure(state="readonly")
def tink_flash():
fw_filename = browse_box.get()
try:
button_state()
tink = Tink(fw_name=fw_filename, port=COM_OVERRIDE)
except:
print('Could not execute flash')
button_state()
return
def button_state():
if browse_button['state'] == "normal":
browse_button.configure(state="disabled")
flash_button.configure(state="disabled")
else:
browse_button.configure(state="normal")
flash_button.configure(state="normal")
if __name__ == '__main__':
signal(SIGINT, sig_handler)
window = tkinter.Tk()
window.geometry('680x380')
window.iconbitmap(default='./assets/icon.ico')
window.title('tinkup-gui')
window.resizable(False,False)
window.eval('tk::PlaceWindow . center')
tink_logo = PhotoImage(file='./assets/RetroTINK-logo.png')
tink_logo = tink_logo.subsample(4,4)
tink_label = tkinter.Label(window,image=tink_logo)
tink_label.place(x=285, y=10)
fw_label = tkinter.Label(window,text="Hex File:")
fw_label.place(x=325, y=90)
browse_box = tkinter.Entry(window,textvariable=fw_filename)
browse_box.configure(state="readonly")
browse_box.place(x=10, y=120, width=582)
browse_button = tkinter.Button(window,text='Load HEX',command=file_select)
browse_button.place(x=610, y=115)
flash_button = tkinter.Button(window, text="Flash", command=tink_flash)
flash_button.place(x=330, y=145)
print_text = sd.ScrolledText(window, undo=True)
print_text.place(x=10, y=180, height=180)
logger = PrintLogger(print_text)
sys.stdout = logger
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
window.mainloop()
on_closing()
```
|
{
"source": "JeromeEippers/pyside_maya_class",
"score": 4
}
|
#### File: JeromeEippers/pyside_maya_class/ControlFlow.py
```python
x = 2
print(x == 2)
print(x == 3)
print(x < 3)
print(x != 3)
print(x <> 3 )
print('John' in ["John", "Rick"])
print('Jeff' in ["John", "Rick"])
print('Jeff' not in ["John", "Rick"])
#--------------------- IF ---------------------------
#if else
x = 0
if x < 0:
print 'negative'
else:
print 'positive'
#if elif else
x = 0
if x < 0:
print 'negative'
elif x == 0:
print 'zero'
elif x == 1:
print 'one'
else:
print 'big number'
#and or
name = "John"
age = 23
if name == "John" and age == 23:
print("Your name is John, and you are also 23 years old.")
if name == "John" or name == "Rick":
print("Your name is either John or Rick.")
#empty list is tested as False
myList = []
if myList:
print('there is some values')
else:
print('there is no value')
myList = [1]
if myList:
print('there is some values')
else:
print('there is no value')
#EXERCICE 1
#Change the values of the 4 variables of this exercice so all tests return true
number = 10
second_number = 2
first_array = []
second_array = [1,2,3]
if number > 15:
print("1")
if first_array:
print("2")
if len(second_array) == 2:
print("3")
if len(first_array) + len(second_array) == 5:
print("4")
if first_array and first_array[0] == 1:
print("5")
if not second_number:
print("6")
#------
#--------------------- LOOPS ---------------------------
# Measure some strings:
words = ['cat', 'window', 'dog']
for w in words:
print w, len(w)
#range function
print( range(5) )
print( range(3,9) )
print( range(3,9,2) )
for x in range(5):
print x
#mix range and len function to loop over indexes of list
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print i, a[i]
#break
myList = list()
for x in range(10):
myList.append( x )
if x == 5:
break
print myList
#continue
myList = list()
for x in range(10):
myList.append( x )
if x == 5:
continue
print myList
#EXERCICE 2
#loop on this list and print all the even numbers but stop at the number 237
numbers = [
951, 402, 984, 651, 360, 69, 408, 319, 601, 485, 980, 507, 725, 547, 544,
615, 83, 165, 141, 501, 263, 617, 865, 575, 219, 390, 984, 592, 236, 105, 942, 941,
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958, 609, 842, 451, 688, 753, 854, 685, 93, 857, 440, 380, 126, 721, 328, 753, 470,
743, 527
]
#>>> [402, 984, 360, 408, 980, 544, 390, 984, 592, 236, 942, 386, 462, 418, 344, 236, 566, 978, 328, 162, 758, 918]
#loop and dictionnaries
myDict = {1:"Aa" , 2:"Bb", 3:"Cc"}
for key in myDict.keys():
print key
myDict = {1:"Aa" , 2:"Bb", 3:"Cc"}
for value in myDict.values():
print value
myDict = {'first': 100, 'second': 'YES', 5: 100, 6: 'NO'}
for key, value in myDict.iteritems():
print key, value
#--------------------- FUNCTIONS ---------------------------
def my_function():
print("Hello From My Function!")
def my_function_with_args(username, greeting):
print("Hello, %s , From My Function!, I wish you %s"%(username, greeting))
def sum_two_numbers(a, b):
return a + b
# print(a simple greeting)
my_function()
#prints - "Hello, <NAME>, From My Function!, I wish you a great year!"
my_function_with_args("<NAME>", "a great year!")
# after this line x will hold the value 3!
x = sum_two_numbers(1,2)
print x
#default argument ------
def parrot(voltage, state='a stiff', action='voom', type='Norwegian Blue'):
print "-- This parrot wouldn't", action,
print "if you put", voltage, "volts through it."
print "-- Lovely plumage, the", type
print "-- It's", state, "!"
parrot(1000) # 1 positional argument
parrot(voltage=1000) # 1 keyword argument
parrot(voltage=1000000, action='VOOOOOM') # 2 keyword arguments
parrot(action='VOOOOOM', voltage=1000000) # 2 keyword arguments
parrot('a million', 'bereft of life', 'jump') # 3 positional arguments
parrot('a thousand', state='pushing up the daisies') # 1 positional, 1 keyword
#multiple return and unpack
def myFunction():
return 10 , 20
x, y = myFunction()
print x
print y
#EXERCICE 3
#Modify this funciton to return a list of names
def get_name_list():
pass
#Modify this function to return the name concatenated with the sentence 'was here'
def get_sentence( name ):
pass
def test_exercice():
for name in get_name_list():
print( get_sentence( name ) )
test_exercice()
```
#### File: pyside_maya_class/fbx_exporter/exporter.py
```python
from PySide2 import QtWidgets, QtGui
import maya.cmds as cmds
import maya.mel as mel
import os
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
class GroupText(QtWidgets.QWidget):
"""Group text create a line edit inside a groupbox"""
def __init__(self, label, text="", placeHolderText="", parent=None):
super(GroupText, self).__init__(parent)
layV = QtWidgets.QVBoxLayout(self)
self.setLayout(layV)
layV.setContentsMargins(0,0,0,0)
grp = QtWidgets.QGroupBox(label, self)
layV.addWidget(grp)
layH = QtWidgets.QHBoxLayout(self)
layH.setContentsMargins(2,0,2,0)
grp.setLayout(layH)
self._line = QtWidgets.QLineEdit(text, self)
self._line.setPlaceholderText(placeHolderText)
layH.addWidget(self._line,100)
layV.addStretch()
def line(self):
return self._line
class AbstractPickerWidget(QtWidgets.QWidget):
"""Base class for a picker
You have to implement the onClicked method"""
def __init__(self, label, text="", placeHolderText="", btnText="", iconBtn="", parent=None):
super(AbstractPickerWidget, self).__init__(parent)
layV = QtWidgets.QVBoxLayout(self)
self.setLayout(layV)
layV.setContentsMargins(0,0,0,0)
grp = QtWidgets.QGroupBox(label, self)
layV.addWidget(grp)
layH = QtWidgets.QHBoxLayout(self)
layH.setContentsMargins(2,0,2,0)
grp.setLayout(layH)
self._line = QtWidgets.QLineEdit(text, self)
self._line.setPlaceholderText(placeHolderText)
layH.addWidget(self._line,100)
if iconBtn:
icon = QtGui.QIcon(iconBtn)
btn = QtWidgets.QPushButton(icon, btnText)
else:
btn = QtWidgets.QPushButton(btnText)
btn.clicked.connect( self.onClicked )
layH.addWidget(btn,1)
layV.addStretch()
def line(self):
return self._line
def onClicked(self):
pass
class MayaPickerWidget( AbstractPickerWidget ):
"""Maya picker widget
This allows to quicly pick an object in Maya"""
def __init__(self, label, text="", placeHolderText="", btnText="", iconBtn="", parent=None):
placeHolderText= placeHolderText or "Pick Maya Object"
btnText = btnText or "<"
super(MayaPickerWidget, self).__init__(label, text, placeHolderText, btnText, iconBtn, parent)
def onClicked(self):
selection = cmds.ls(sl=True)
if selection:
self._line.setText(selection[0])
class FolderPickerWidget( AbstractPickerWidget ):
"""Folder picker widget
pick a folder on the disk"""
def __init__(self, label, text="", placeHolderText="", btnText="", iconBtn="", parent=None):
iconBtn = iconBtn or ":/folder-closed.png"
placeHolderText= placeHolderText or "Select Folder"
super(FolderPickerWidget, self).__init__(label, text, placeHolderText, btnText, iconBtn, parent)
def onClicked(self):
dir = QtWidgets.QFileDialog.getExistingDirectory( self, "Select Folder" )
self._line.setText(dir)
class MyWindow(MayaQWidgetDockableMixin, QtWidgets.QWidget):
"""Main exporter widget"""
SAVE_OBJ = "exporter_options"
def __init__(self, parent=None):
"""Set the layout"""
super(MyWindow, self).__init__(parent)
self.setWindowTitle("Exporter")
lay = QtWidgets.QVBoxLayout(self)
self.rootFolder = FolderPickerWidget("Export Folder")
lay.addWidget(self.rootFolder)
self.rigName = GroupText("Rig Name")
self.rigName.line().textChanged.connect(self._canExportValidator)
lay.addWidget(self.rigName)
self.exportRig = QtWidgets.QGroupBox("Export Rig")
self.exportRig.setCheckable(True)
self.exportRig.setChecked(False)
self.exportRig.toggled.connect(self._canExportValidator)
groupLayout = QVBoxLayout(self)
self.exportRig.setLayout(groupLayout)
lay.addWidget(self.exportRig)
self.rigRoot = MayaPickerWidget("Rig Root Object")
self.rigRoot.line().textChanged.connect(self._canExportValidator)
groupLayout.addWidget(self.rigRoot)
self.exportAnim = QtWidgets.QGroupBox("Export Animation")
self.exportAnim.setCheckable(True)
self.exportAnim.toggled.connect(self._canExportValidator)
groupLayout = QVBoxLayout(self)
self.exportAnim.setLayout(groupLayout)
lay.addWidget(self.exportAnim)
self.animName = GroupText("Animation Name")
self.animName.line().textChanged.connect(self._canExportValidator)
groupLayout.addWidget(self.animName)
self.animRoot = MayaPickerWidget("Anim Root Object")
self.animRoot.line().textChanged.connect(self._canExportValidator)
groupLayout.addWidget(self.animRoot)
self.startFrame = GroupText("start frame", text="0")
self.startFrameValidator = QtGui.QIntValidator()
self.startFrame.line().setValidator(self.startFrameValidator)
self.startFrame.line().textChanged.connect(self._canExportValidator)
groupLayout.addWidget(self.startFrame)
self.endFrame = GroupText("end frame", text="30")
self.endFrameValidator = QtGui.QIntValidator()
self.endFrame.line().setValidator(self.endFrameValidator)
self.endFrame.line().textChanged.connect(self._canExportValidator)
groupLayout.addWidget(self.endFrame)
lay.addStretch(2000)
buttonsLayout = QtWidgets.QHBoxLayout(self)
lay.addLayout(buttonsLayout)
self.saveBtn = QtWidgets.QPushButton("Save")
self.saveBtn.clicked.connect(self.onSave)
buttonsLayout.addWidget(self.saveBtn)
self.exportBtn = QtWidgets.QPushButton("EXPORT")
self.exportBtn.clicked.connect(self.onExport)
buttonsLayout.addWidget(self.exportBtn,100)
self._canExportValidator()
def _canExportValidator(self, event=None):
"""call back on all modification, so we can enable the export button"""
self.exportBtn.setEnabled(self.canExport())
def options(self):
"""return the options of the tool as a dictionary"""
return {
'export_folder' : ( str( self.rootFolder.line().text() )).replace('\\', '/'),
'rig_name' : str( self.rigName.line().text() ),
'export_rig' : self.exportRig.isChecked(),
'rig_root' : str( self.rigRoot.line().text() ),
'export_animation' : self.exportAnim.isChecked(),
'animation_name' : str( self.animName.line().text() ),
'animation_root' : str( self.animRoot.line().text() ),
'startFrame' : int( self.startFrame.line().text() ),
'endFrame' : int( self.endFrame.line().text() )
}
def canExport(self, options=None):
"""check if we can export something using the options from the tool, or from a specific set of options"""
options = options or self.options()
if options['export_folder'] == '' or options['rig_name'] == '':
return False
if options['export_rig'] == False and options['export_animation'] == False:
return False
if options['export_rig'] == True:
if options['rig_root'] == '':
return False
if options['export_animation'] == True:
if options['animation_name'] == '' or options['animation_root'] == '':
return False
return True
def export_fbx(self, path, root, startFrame=0, endFrame=0):
"""Export an FBX file"""
cmds.select( root )
mel.eval('FBXExportAnimationOnly -v false')
mel.eval('FBXExportBakeComplexAnimation -v true')
mel.eval('FBXExportBakeComplexStart -v {0}'.format(startFrame))
mel.eval('FBXExportBakeComplexEnd -v {0}'.format(endFrame))
mel.eval('FBXExportBakeResampleAnimation -v true')
mel.eval('FBXExportConstraints -v false')
mel.eval('FBXExportInputConnections -v false')
mel.eval('FBXExportSkeletonDefinitions -v true')
mel.eval('FBXExportSkins -v true')
mel.eval('FBXExport -f "{0}" -s'.format( path.replace('\\', '/') ) )
def onExport(self, options=None):
"""export using the options from the tool, or from a specific set of options"""
options = options or self.options()
if self.canExport( options ):
folder = os.path.normpath ( options['export_folder'] )
if options['export_rig']:
path = os.path.join(folder, options['rig_name'] + '.fbx')
self.export_fbx(path, options['rig_root'])
if options['export_animation']:
path = os.path.join(folder, options['rig_name'] + '@' + options['animation_name'] + '.fbx')
self.export_fbx(path, options['animation_root'], options['startFrame'], options['endFrame'])
def onSave(self):
"""saving the options in the file"""
if cmds.objExists( self.SAVE_OBJ ):
cmds.delete(self.SAVE_OBJ)
node = cmds.createNode('transform', name=self.SAVE_OBJ)
cmds.addAttr( node, longName='options', dt='string' )
cmds.setAttr( node + '.options', str(self.options()), type='string' )
win = MyWindow()
win.show(dockable=True)
```
#### File: pyside_maya_class/maya_devkit_scripts/cacheFileConverter.py
```python
import os
import os.path
import getopt
import sys
import xml.dom.minidom
import string
import re
import array
"""
This example shows how to convert float channels found in cache files in Maya 8.5 and later to
double channels, so that the cache file would then be compatible with the
geometry cache in Maya 8.0. It parses the XML file in addition to the cache data files and
handles caches that are one file per frame as well as one file.
To use:
python cacheFileConverter.py -f mayaCacheFile.xml -o outputFileName
Overview of Maya Caches:
========================
Conceptually, a Maya cache consists of 1 or more channels of data.
Each channel has a number of properties, such as:
- start/end time
- data type of the channel (eg. "DoubleVectorArray" to represents a point array)
- interpretation (eg. "positions" the vector array represents position data, as opposed to per vertex normals, for example)
- sampling type (eg. "regular" or "irregular")
- sampling rate (meaningful only if sampling type is "regular")
Each channel has a number of data points in time, not necessarily regularly spaced,
and not necessarily co-incident in time with data in other channels.
At the highest level, a Maya cache is simply made up of channels and their data in time.
On disk, the Maya cache is made up of a XML description file, and 1 or more data files.
The description file provides a high level overview of what the cache contains,
such as the cache type (one file, or one file per frame), channel names, interpretation, etc.
The data files contain the actual data for the channels.
In the case of one file per frame, a naming convention is used so the cache can check its
available data at runtime.
Here is a visualization of the data format of the OneFile case:
// |---CACH (Group) // Header
// | |---VRSN // Version Number (char*)
// | |---STIM // Start Time of the Cache File (int)
// | |---ETIM // End Time of the Cache File (int)
// |
// |---MYCH (Group) // 1st Time
// | |---TIME // Time (int)
// | |---CHNM // 1st Channel Name (char*)
// | |---SIZE // 1st Channel Size
// | |---DVCA // 1st Channel Data (Double Vector Array)
// | |---CHNM // n-th Channel Name
// | |---SIZE // n-th Channel Size
// | |---DVCA // n-th Channel Data (Double Vector Array)
// | |..
// |
// |---MYCH (Group) // 2nd Time
// | |---TIME // Time
// | |---CHNM // 1st Channel Name
// | |---SIZE // 1st Channel Size
// | |---DVCA // 1st Channel Data (Double Vector Array)
// | |---CHNM // n-th Channel Name
// | |---SIZE // n-th Channel Size
// | |---DVCA // n-th Channel Data (Double Vector Array)
// | |..
// |
// |---..
// |
//
In a multiple file caches, the only difference is that after the
header "CACH" group, there is only one MYCH group and there is no
TIME chunk. In the case of one file per frame, the time is part of
the file name - allowing Maya to scan at run time to see what data
is actually available, and it allows users to move data in time by
manipulating the file name.
!Note that it's not necessary to have data for every channel at every time.
"""
class CacheChannel:
m_channelName = ""
m_channelType = ""
m_channelInterp = ""
m_sampleType = ""
m_sampleRate = 0
m_startTime = 0
m_endTime = 0
def __init__(self,channelName,channelType,interpretation,samplingType,samplingRate,startTime,endTime):
self.m_channelName = channelName
self.m_channelType = channelType
self.m_channelInterp = interpretation
self.m_sampleType = samplingType
self.m_sampleRate = samplingRate
self.m_startTime = startTime
self.m_endTime = endTime
class CacheFile:
m_baseFileName = ""
m_directory = ""
m_fullPath = ""
m_cacheType = ""
m_cacheStartTime = 0
m_cacheEndTime = 0
m_timePerFrame = 0
m_version = 0.0
m_channels = []
########################################################################
# Description:
# Class constructor - tries to figure out full path to cache
# xml description file before calling parseDescriptionFile()
#
def __init__(self,fileName):
# fileName can be the full path to the .xml description file,
# or just the filename of the .xml file, with or without extension
# if it is in the current directory
dir = os.path.dirname(fileName)
fullPath = ""
if dir == "":
currDir = os.getcwd()
fullPath = os.path.join(currDir,fileName)
if not os.path.exists(fullPath):
fileName = fileName + '.xml';
fullPath = os.path.join(currDir,fileName)
if not os.path.exists(fullPath):
print "Sorry, can't find the file %s to be opened\n" % fullPath
sys.exit(2)
else:
fullPath = fileName
self.m_baseFileName = os.path.basename(fileName).split('.')[0]
self.m_directory = os.path.dirname(fullPath)
self.m_fullPath = fullPath
self.parseDescriptionFile(fullPath)
########################################################################
# Description:
# Writes a converted description file, where all instances of "FloatVectorArray"
# are replaced with "DoubleVectorArray"
#
def writeConvertedDescriptionFile(self,outputFileName):
newXmlFileName = outputFileName + ".xml"
newXmlFullPath = os.path.join(self.m_directory,newXmlFileName)
fd = open(self.m_fullPath,"r")
fdOut = open(newXmlFullPath,"w")
lines = fd.readlines()
for line in lines:
if line.find("FloatVectorArray") >= 0:
line = line.replace("FloatVectorArray","DoubleVectorArray")
fdOut.write(line)
########################################################################
# Description:
# Given the full path to the xml cache description file, this
# method parses its contents and sets the relevant member variables
#
def parseDescriptionFile(self,fullPath):
dom = xml.dom.minidom.parse(fullPath)
root = dom.getElementsByTagName("Autodesk_Cache_File")
allNodes = root[0].childNodes
for node in allNodes:
if node.nodeName == "cacheType":
self.m_cacheType = node.attributes.item(0).nodeValue
if node.nodeName == "time":
timeRange = node.attributes.item(0).nodeValue.split('-')
self.m_cacheStartTime = int(timeRange[0])
self.m_cacheEndTime = int(timeRange[1])
if node.nodeName == "cacheTimePerFrame":
self.m_timePerFrame = int(node.attributes.item(0).nodeValue)
if node.nodeName == "cacheVersion":
self.m_version = float(node.attributes.item(0).nodeValue)
if node.nodeName == "Channels":
self.parseChannels(node.childNodes)
########################################################################
# Description:
# helper method to extract channel information
#
def parseChannels(self,channels):
for channel in channels:
if re.compile("channel").match(channel.nodeName) != None :
channelName = ""
channelType = ""
channelInterp = ""
sampleType = ""
sampleRate = 0
startTime = 0
endTime = 0
for index in range(0,channel.attributes.length):
attrName = channel.attributes.item(index).nodeName
if attrName == "ChannelName":
channelName = channel.attributes.item(index).nodeValue
if attrName == "ChannelInterpretation":
channelInterp = channel.attributes.item(index).nodeValue
if attrName == "EndTime":
endTime = int(channel.attributes.item(index).nodeValue)
if attrName == "StartTime":
startTime = int(channel.attributes.item(index).nodeValue)
if attrName == "SamplingRate":
sampleRate = int(channel.attributes.item(index).nodeValue)
if attrName == "SamplingType":
sampleType = channel.attributes.item(index).nodeValue
if attrName == "ChannelType":
channelType = channel.attributes.item(index).nodeValue
channelObj = CacheChannel(channelName,channelType,channelInterp,sampleType,sampleRate,startTime,endTime)
self.m_channels.append(channelObj)
def fileFormatError():
print "Error: unable to read cache format\n";
sys.exit(2)
def readInt(fd,needSwap):
intArray = array.array('l')
intArray.fromfile(fd,1)
if needSwap:
intArray.byteswap()
return intArray[0]
def writeInt(fd,outInt,needSwap):
intArray = array.array('l')
intArray.insert(0,outInt)
if needSwap:
intArray.byteswap()
intArray.tofile(fd)
########################################################################
# Description:
# method to parse and convert the contents of the data file, for the
# One large file case ("OneFile")
def parseDataOneFile(cacheFile,outFileName):
dataFilePath = os.path.join(cacheFile.m_directory,cacheFile.m_baseFileName)
dataFileNameOut = outFileName + ".mc"
dataFilePathOut = os.path.join(cacheFile.m_directory,dataFileNameOut)
dataFilePath = dataFilePath + ".mc"
if not os.path.exists(dataFilePath):
print "Error: unable to open cache data file at %s\n" % dataFilePath
sys.exit(2)
fd = open(dataFilePath,"rb")
fdOut = open(dataFilePathOut,"wb")
blockTag = fd.read(4)
fdOut.write(blockTag)
#blockTag must be FOR4
if blockTag != "FOR4":
fileFormatError()
platform = sys.platform
needSwap = False
if re.compile("win").match(platform) != None :
needSwap = True
if re.compile("linux").match(platform) != None :
needSwap = True
offset = readInt(fd,needSwap)
writeInt(fdOut,offset,needSwap)
#The 1st block is the header, not used.
#just write out as is
header = fd.read(offset)
fdOut.write(header)
while True:
#From now on the file is organized in blocks of time
#Each block holds the data for all the channels at that
#time
blockTag = fd.read(4)
fdOut.write(blockTag)
if blockTag == "":
#EOF condition...we are done
return
if blockTag != "FOR4":
fileFormatError()
blockSize = readInt(fd,needSwap)
#We cannot just write out the old block size, since we are potentially converting
#Float channels to doubles, the block size may increase.
newBlockSize = 0
bytesRead = 0
#Since we don't know the size of the block yet, we will cache everything in a dictionary,
#and write everything out in the end.
blockContents = {}
mychTag = fd.read(4)
if mychTag != "MYCH":
fileFormatError()
bytesRead += 4
blockContents['mychTag'] = mychTag
timeTag = fd.read(4)
if timeTag != "TIME":
fileFormatError()
bytesRead += 4
blockContents['timeTag']= timeTag
#Next 32 bit int is the size of the time variable,
#this is always 4
timeVarSize = readInt(fd,needSwap)
bytesRead += 4
blockContents['timeVarSize']= timeVarSize
#Next 32 bit int is the time itself, in ticks
#1 tick = 1/6000 of a second
time = readInt(fd,needSwap)
bytesRead += 4
blockContents['time']= time
newBlockSize = bytesRead
channels = []
blockContents['channels'] = channels
print "Converting Data found at time %f seconds...\n"%(time/6000.0)
while bytesRead < blockSize:
channelContents = {}
#channel name is next.
#the tag for this must be CHNM
chnmTag = fd.read(4)
if chnmTag != "CHNM":
fileFormatError()
bytesRead += 4
newBlockSize += 4
channelContents['chnmTag'] = chnmTag
#Next comes a 32 bit int that tells us how long the
#channel name is
chnmSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['chnmSize'] = chnmSize
#The string is padded out to 32 bit boundaries,
#so we may need to read more than chnmSize
mask = 3
chnmSizeToRead = (chnmSize + mask) & (~mask)
channelName = fd.read(chnmSize)
paddingSize = chnmSizeToRead-chnmSize
channelContents['channelName'] = channelName
channelContents['paddingSize'] = paddingSize
if paddingSize > 0:
padding = fd.read(paddingSize)
channelContents['padding'] = padding
bytesRead += chnmSizeToRead
newBlockSize += chnmSizeToRead
#Next is the SIZE field, which tells us the length
#of the data array
sizeTag = fd.read(4)
channelContents['sizeTag'] = sizeTag
if sizeTag != "SIZE":
fileFormatError()
bytesRead += 4
newBlockSize += 4
#Next 32 bit int is the size of the array size variable,
#this is always 4, so we'll ignore it for now
#though we could use it as a sanity check.
arrayVarSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayVarSize'] = arrayVarSize
#finally the actual size of the array:
arrayLength = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayLength'] = arrayLength
#data format tag:
dataFormatTag = fd.read(4)
#buffer length - how many bytes is the actual data
bufferLength = readInt(fd,needSwap)
bytesRead += 8
newBlockSize += 8
numPointsToPrint = 5
if dataFormatTag == "FVCA":
#FVCA == Float Vector Array
outDataTag = "DVCA"
channelContents['dataFormatTag'] = outDataTag
if bufferLength != arrayLength*3*4:
fileFormatError()
outBufLength = bufferLength*2
channelContents['bufferLength'] = outBufLength
floatArray = array.array('f')
floatArray.fromfile(fd,arrayLength*3)
doubleArray = array.array('d')
bytesRead += arrayLength*3*4
newBlockSize += arrayLength*3*8
if needSwap:
floatArray.byteswap()
for index in range(0,arrayLength*3):
doubleArray.append(floatArray[index])
if needSwap:
doubleArray.byteswap()
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
elif dataFormatTag == "DVCA":
#DVCA == Double Vector Array
channelContents['dataFormatTag'] = dataFormatTag
if bufferLength != arrayLength*3*8:
fileFormatError()
channelContents['bufferLength'] = bufferLength
doubleArray = array.array('d')
doubleArray.fromfile(fd,arrayLength*3)
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
bytesRead += arrayLength*3*8
newBlockSize += arrayLength*3*8
else:
fileFormatError()
#Now that we have completely parsed this block, we are ready to output it
writeInt(fdOut,newBlockSize,needSwap)
fdOut.write(blockContents['mychTag'])
fdOut.write(blockContents['timeTag'])
writeInt(fdOut,blockContents['timeVarSize'],needSwap)
writeInt(fdOut,blockContents['time'],needSwap)
for channelContents in channels:
fdOut.write(channelContents['chnmTag'])
writeInt(fdOut,channelContents['chnmSize'],needSwap)
fdOut.write(channelContents['channelName'])
if channelContents['paddingSize'] > 0:
fdOut.write(channelContents['padding'])
fdOut.write(channelContents['sizeTag'])
writeInt(fdOut,channelContents['arrayVarSize'],needSwap)
writeInt(fdOut,channelContents['arrayLength'],needSwap)
fdOut.write(channelContents['dataFormatTag'])
writeInt(fdOut,channelContents['bufferLength'],needSwap)
channelContents['doubleArray'].tofile(fdOut)
########################################################################
# Description:
# method to parse and convert the contents of the data file, for the
# file per frame case ("OneFilePerFrame")
def parseDataFilePerFrame(cacheFile,outFileName):
allFilesInDir = os.listdir(cacheFile.m_directory)
matcher = re.compile(cacheFile.m_baseFileName)
dataFiles = []
for afile in allFilesInDir:
if os.path.splitext(afile)[1] == ".mc" and matcher.match(afile) != None:
dataFiles.append(afile)
for dataFile in dataFiles:
fileName = os.path.split(dataFile)[1]
baseName = os.path.splitext(fileName)[0]
frameAndTickNumberStr = baseName.split("Frame")[1]
frameAndTickNumber = frameAndTickNumberStr.split("Tick")
frameNumber = int(frameAndTickNumber[0])
tickNumber = 0
if len(frameAndTickNumber) > 1:
tickNumber = int(frameAndTickNumber[1])
timeInTicks = frameNumber*cacheFile.m_timePerFrame + tickNumber
print "--------------------------------------------------------------\n"
print "Converting data at time %f seconds:\n"%(timeInTicks/6000.0)
fd = open(dataFile,"rb")
dataFileOut = outFileName + "Frame" + frameAndTickNumberStr + ".mc"
dataFileOutPath = os.path.join(cacheFile.m_directory,dataFileOut)
fdOut = open(dataFileOutPath,"wb")
blockTag = fd.read(4)
#blockTag must be FOR4
if blockTag != "FOR4":
fileFormatError()
fdOut.write(blockTag)
platform = sys.platform
needSwap = False
if re.compile("win").match(platform) != None :
needSwap = True
if re.compile("linux").match(platform) != None :
needSwap = True
offset = readInt(fd,needSwap)
writeInt(fdOut,offset,needSwap)
#The 1st block is the header, not used.
#write out as is.
header = fd.read(offset)
fdOut.write(header)
blockTag = fd.read(4)
if blockTag != "FOR4":
fileFormatError()
fdOut.write(blockTag)
blockSize = readInt(fd,needSwap)
#We cannot just write out the old block size, since we are potentially converting
#Float channels to doubles, the block size may increase.
newBlockSize = 0
bytesRead = 0
#Since we don't know the size of the block yet, we will cache everything in a dictionary,
#and write everything out in the end.
blockContents = {}
mychTag = fd.read(4)
blockContents['mychTag'] = mychTag
if mychTag != "MYCH":
fileFormatError()
bytesRead += 4
#Note that unlike the oneFile case, for file per frame there is no
#TIME tag at this point. The time of the data is embedded in the
#file name itself.
newBlockSize = bytesRead
channels = []
blockContents['channels'] = channels
while bytesRead < blockSize:
channelContents = {}
#channel name is next.
#the tag for this must be CHNM
chnmTag = fd.read(4)
if chnmTag != "CHNM":
fileFormatError()
bytesRead += 4
newBlockSize += 4
channelContents['chnmTag'] = chnmTag
#Next comes a 32 bit int that tells us how long the
#channel name is
chnmSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['chnmSize'] = chnmSize
#The string is padded out to 32 bit boundaries,
#so we may need to read more than chnmSize
mask = 3
chnmSizeToRead = (chnmSize + mask) & (~mask)
channelName = fd.read(chnmSize)
paddingSize = chnmSizeToRead-chnmSize
channelContents['channelName'] = channelName
channelContents['paddingSize'] = paddingSize
if paddingSize > 0:
padding = fd.read(paddingSize)
channelContents['padding'] = padding
bytesRead += chnmSizeToRead
newBlockSize += chnmSizeToRead
#Next is the SIZE field, which tells us the length
#of the data array
sizeTag = fd.read(4)
channelContents['sizeTag'] = sizeTag
if sizeTag != "SIZE":
fileFormatError()
bytesRead += 4
newBlockSize += 4
#Next 32 bit int is the size of the array size variable,
#this is always 4, so we'll ignore it for now
#though we could use it as a sanity check.
arrayVarSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayVarSize'] = arrayVarSize
#finally the actual size of the array:
arrayLength = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayLength'] = arrayLength
#data format tag:
dataFormatTag = fd.read(4)
#buffer length - how many bytes is the actual data
bufferLength = readInt(fd,needSwap)
bytesRead += 8
newBlockSize += 8
numPointsToPrint = 5
if dataFormatTag == "FVCA":
#FVCA == Float Vector Array
outDataTag = "DVCA"
channelContents['dataFormatTag'] = outDataTag
if bufferLength != arrayLength*3*4:
fileFormatError()
outBufLength = bufferLength*2
channelContents['bufferLength'] = outBufLength
floatArray = array.array('f')
floatArray.fromfile(fd,arrayLength*3)
bytesRead += arrayLength*3*4
newBlockSize += arrayLength*3*8
doubleArray = array.array('d')
if needSwap:
floatArray.byteswap()
for index in range(0,arrayLength*3):
doubleArray.append(floatArray[index])
if needSwap:
doubleArray.byteswap()
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
elif dataFormatTag == "DVCA":
#DVCA == Double Vector Array
channelContents['dataFormatTag'] = dataFormatTag
if bufferLength != arrayLength*3*8:
fileFormatError()
channelContents['bufferLength'] = bufferLength
doubleArray = array.array('d')
doubleArray.fromfile(fd,arrayLength*3)
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
bytesRead += arrayLength*3*8
newBlockSize += arrayLength*3*8
else:
fileFormatError()
#Now that we have completely parsed this block, we are ready to output it
writeInt(fdOut,newBlockSize,needSwap)
fdOut.write(blockContents['mychTag'])
for channelContents in channels:
fdOut.write(channelContents['chnmTag'])
writeInt(fdOut,channelContents['chnmSize'],needSwap)
fdOut.write(channelContents['channelName'])
if channelContents['paddingSize'] > 0:
fdOut.write(channelContents['padding'])
fdOut.write(channelContents['sizeTag'])
writeInt(fdOut,channelContents['arrayVarSize'],needSwap)
writeInt(fdOut,channelContents['arrayLength'],needSwap)
fdOut.write(channelContents['dataFormatTag'])
writeInt(fdOut,channelContents['bufferLength'],needSwap)
channelContents['doubleArray'].tofile(fdOut)
def usage():
print "Use -f to indicate the cache description file (.xml) you wish to convert\nUse -o to indicate the output filename"
try:
(opts, args) = getopt.getopt(sys.argv[1:], "f:o:")
except getopt.error:
# print help information and exit:
usage()
sys.exit(2)
if len(opts) != 2:
usage()
sys.exit(2)
fileName = ""
outFileName = ""
for o,a in opts:
if o == "-f":
fileName = a
if o == "-o":
outFileName = a
cacheFile = CacheFile(fileName)
if cacheFile.m_version > 2.0:
print "Error: this script can only parse cache files of version 2 or lower\n"
sys.exit(2)
print "Outputing new description file...\n"
cacheFile.writeConvertedDescriptionFile(outFileName)
print "Beginning Conversion of data files...\n"
if cacheFile.m_cacheType == "OneFilePerFrame":
parseDataFilePerFrame(cacheFile,outFileName)
elif cacheFile.m_cacheType == "OneFile":
parseDataOneFile(cacheFile,outFileName)
else:
print "unknown cache type!\n"
```
#### File: pyside_maya_class/maya_pyside/003_button.py
```python
from PySide2 import QtWidgets
import maya.cmds as cmds
class MyWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
layV = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel('This is rad')
layV.addWidget(label)
btn = QtWidgets.QPushButton('PRESS ME')
btn.clicked.connect( self.onClicked )
layV.addWidget(btn)
def onClicked(self):
cmds.polyCube()
win = MyWindow()
win.show()
```
#### File: pyside_maya_class/maya_pyside/005_main_window_dialog.py
```python
from PySide2 import QtWidgets
import shiboken2
import maya.OpenMayaUI as apiUI
def getMayaWindow():
"""
Get the main Maya window as a QtGui.QMainWindow instance
@return: QtGui.QMainWindow instance of the top level Maya windows
"""
ptr = apiUI.MQtUtil.mainWindow()
if ptr is not None:
return shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget)
class MyWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
lay = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel('label1')
lay.addWidget(label)
label = QtWidgets.QLabel('label2')
lay.addWidget(label)
win = MyWindow( getMayaWindow() )
win.show()
```
#### File: maya_pyside/exercice/001_layout.py
```python
from PySide2 import QtWidgets
class MyWindow (QtWidgets.QDialog):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
lay = QtWidgets.QVBoxLayout(self)
layH = QtWidgets.QHBoxLayout(self)
lay.addLayout(layH)
label = QtWidgets.QLabel('hello', self)
layH.addWidget(label)
label = QtWidgets.QLabel('world', self)
layH.addWidget(label)
label = QtWidgets.QLabel('this is rad', self)
lay.addWidget(label)
win = MyWindow()
win.show()
```
#### File: unityTool/dataType/Prefab.py
```python
from Component import Component
class Prefab(Component):
def __init__(self, fileId, data, file):
super(Prefab, self).__init__(fileId, data, file, 'Prefab')
def parentPrefabGuid(self):
return self._data['m_ParentPrefab']['guid']
def isPrefabParent(self):
return self._data['m_IsPrefabParent']
```
|
{
"source": "JeromeEippers/python_rnd_collection",
"score": 2
}
|
#### File: npk/animation_framework/fbxreader.py
```python
import sys
import numpy as np
from .skeleton import Skeleton, Bone
from . import posquat as pq
fbxsdkpath = r'D:\Software\fbx_python37_x64'
if fbxsdkpath not in sys.path:
sys.path.append(fbxsdkpath)
import FbxCommon as fb
import fbx
def find_mesh_node(pScene):
def _get_mesh(pNode):
if isinstance(pNode.GetNodeAttribute(), fbx.FbxMesh):
return pNode
for i in range(pNode.GetChildCount()):
ret = _get_mesh(pNode.GetChild(i))
if ret:
return ret
node = _get_mesh(pScene.GetRootNode())
if node :
return node
return None
def read_vertices_buffer(lMeshNode):
lMesh = lMeshNode.GetNodeAttribute()
lControlPointsCount = lMesh.GetControlPointsCount()
lControlPoints = lMesh.GetControlPoints()
m = lMeshNode.EvaluateGlobalTransform()
# 3pos, 3normal
vertexstride = 6
vertices = np.zeros((lControlPointsCount, vertexstride), dtype=np.float32)
for i in range(lControlPointsCount):
# get positions
vertices[i, :3] = list(m.MultT(lControlPoints[i]))[:3]
# get normals
for j in range(lMesh.GetLayerCount()):
leNormals = lMesh.GetLayer(j).GetNormals()
if leNormals:
if leNormals.GetMappingMode() == fbx.FbxLayerElement.eByControlPoint:
if leNormals.GetReferenceMode() == fbx.FbxLayerElement.eDirect:
vertices[i, 3:6] = list(m.MultT(leNormals.GetDirectArray().GetAt(i)))[:3]
return vertices
def read_index_buffer(lMeshNode):
lMesh = lMeshNode.GetNodeAttribute()
lPolygonCount = lMesh.GetPolygonCount()
faces = np.zeros(lPolygonCount * 10, dtype=np.int)
arrayid = 0
for i in range(lPolygonCount):
lPolygonSize = lMesh.GetPolygonSize(i)
# retriangulate
for j in range(2, lPolygonSize):
faces[arrayid] = lMesh.GetPolygonVertex(i, j - 2)
arrayid += 1
faces[arrayid] = lMesh.GetPolygonVertex(i, j - 1)
arrayid += 1
faces[arrayid] = lMesh.GetPolygonVertex(i, j)
arrayid += 1
return faces[:arrayid]
def read_skeleton(pScene):
skeleton = Skeleton()
def _skel(pNode, pParent):
bone = Bone(pNode.GetName(), pParent)
if pParent > -1:
skeleton.bones[pParent].children.append(bone)
skeleton.bones.append(bone)
boneid = len(skeleton.bones) - 1
m = pNode.EvaluateGlobalTransform()
for i in range(4):
for j in range(4):
skeleton.bindpose[boneid, i, j] = m.Get(i, j)
skeleton.initialpose[boneid, i, j] = m.Get(i, j)
for i in range(pNode.GetChildCount()):
childnode = pNode.GetChild(i)
if isinstance(childnode.GetNodeAttribute(), fbx.FbxMesh) == False:
_skel(childnode, boneid)
lRootNode = pScene.GetRootNode()
_skel(lRootNode.GetChild(0), -1)
#add cop bone
cop = Bone('COP', 0)
skeleton.bones[0].children.append(cop)
skeleton.bones.append(cop)
skeleton.bindpose = skeleton.bindpose[:len(skeleton.bones), :, :]
skeleton.initialpose = skeleton.initialpose[:len(skeleton.bones), :, :]
skeleton.parentlist = [bone.parent for bone in skeleton.bones]
skeleton.upleglength = np.linalg.norm(skeleton.initialpose[skeleton.boneid('Model:LeftUpLeg'), 3, :3] -
skeleton.initialpose[skeleton.boneid('Model:LeftLeg'), 3, :3])
skeleton.leglength = np.linalg.norm(skeleton.initialpose[skeleton.boneid('Model:LeftLeg'), 3, :3] -
skeleton.initialpose[skeleton.boneid('Model:LeftFoot'), 3, :3])
skeleton.hipsid = skeleton.boneid('Model:Hips')
skeleton.leftlegids = [skeleton.boneid('Model:LeftUpLeg'),
skeleton.boneid('Model:LeftLeg'),
skeleton.boneid('Model:LeftFoot')]
skeleton.rightlegids = [skeleton.boneid('Model:RightUpLeg'),
skeleton.boneid('Model:RightLeg'),
skeleton.boneid('Model:RightFoot')]
skeleton.leftfootid = skeleton.leftlegids[-1]
skeleton.rightfootid = skeleton.rightlegids[-1]
skeleton.copid = skeleton.boneid('COP')
skeleton.bindpose[skeleton.copid, ...] = np.eye(4)
skeleton.initialpose[skeleton.copid, ...] = np.eye(4)
skeleton.localinitialpq = skeleton.global_to_local(pq.pose_to_pq(skeleton.initialpose))
return skeleton
def read_bindpose(lMeshNode, skeleton):
lMesh = lMeshNode.GetNodeAttribute()
skin = lMesh.GetDeformer(0,fbx.FbxDeformer.eSkin)
clustercount = skin.GetClusterCount()
for clusterid in range(clustercount):
cluster = skin.GetCluster(clusterid)
linkedNode = cluster.GetLink()
boneid = skeleton.boneid(linkedNode.GetName())
if boneid < 0:
raise Exception('bone {} not found in skeleton'.format(linkedNode.GetName()))
m = fbx.FbxAMatrix()
m = cluster.GetTransformLinkMatrix(m)
m = m.Inverse()
for i in range(4):
for j in range(4):
skeleton.bindpose[boneid,i,j] = m.Get(i,j)
def read_skinning(lMeshNode, skeleton):
lMesh = lMeshNode.GetNodeAttribute()
lControlPointsCount = lMesh.GetControlPointsCount()
weights = np.zeros([lControlPointsCount, 8])
indices = np.zeros([lControlPointsCount, 8], dtype=np.int32)
counts = np.zeros([lControlPointsCount], dtype=np.int32)
skin = lMesh.GetDeformer(0, fbx.FbxDeformer.eSkin)
clustercount = skin.GetClusterCount()
for clusterid in range(clustercount):
cluster = skin.GetCluster(clusterid)
linkedNode = cluster.GetLink()
boneid = skeleton.boneid(linkedNode.GetName())
if boneid < 0:
raise Exception('bone {} not found in skeleton'.format(linkedNode.GetName()))
vertcount = cluster.GetControlPointIndicesCount()
for k in range(vertcount):
vertindex = cluster.GetControlPointIndices()[k]
index = counts[vertindex]
indices[vertindex, index] = boneid
weights[vertindex, index] = cluster.GetControlPointWeights()[k]
counts[vertindex] += 1
ind = np.argsort(weights)[:,-4:]
normalizeweights = np.zeros([lControlPointsCount, 4])
normalizeindices = np.zeros([lControlPointsCount, 4], dtype=np.int32)
for i in range(lControlPointsCount):
normalizeweights[i,:] = weights[i,ind[i]]
normalizeweights[i, :] /= np.sum(normalizeweights[i, :])
normalizeindices[i, :] = indices[i, ind[i]]
return normalizeindices, normalizeweights
def read_animations(pScene, skeleton):
animations = {}
time = fbx.FbxTime()
lRootNode = pScene.GetRootNode()
mapping = {bone.name:lRootNode.FindChild(bone.name,True,True) for bone in skeleton.bones }
for i in range(pScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))):
lAnimStack = pScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), i)
pScene.SetCurrentAnimationStack(lAnimStack)
start = lAnimStack.LocalStart.Get()
stop = lAnimStack.LocalStop.Get()
name = lAnimStack.GetName()
animlen = stop.GetFrameCount() - start.GetFrameCount() + 1
bonelen = len(skeleton.bones)
animation = np.repeat(skeleton.initialpose[np.newaxis,...], animlen, axis=0)
for frame in range(start.GetFrameCount(), stop.GetFrameCount() + 1):
animframe = frame - start.GetFrameCount()
time.SetFrame(frame)
for boneid in range(bonelen):
bone = skeleton.bones[boneid]
if bone.name in mapping and mapping[bone.name] is not None:
localMatrix = mapping[bone.name].EvaluateGlobalTransform(time)
for i in range(4):
for j in range(4):
animation[animframe, boneid, i, j] = localMatrix.Get(i, j)
animations[name] = animation
return animations
class FbxReader(object):
def __init__(self, path):
lSdkManager, lScene = fb.InitializeSdkObjects()
status = fb.LoadScene(lSdkManager, lScene, path)
if not status:
raise Exception('error in fbx file')
self._scene = lScene
self._mesh = find_mesh_node(self._scene)
self._vertices = None
self._indices = None
self._skinning = None
self._skeleton = None
self._animations = None
def vertices_and_indices(self):
if self._mesh:
if self._vertices is None:
self._vertices = read_vertices_buffer(self._mesh)
if self._indices is None:
self._indices = read_index_buffer(self._mesh)
return self._vertices, self._indices
raise Exception('no mesh')
def skeleton(self):
if self._skeleton is None:
self._skeleton = read_skeleton(self._scene)
if self._mesh:
read_bindpose(self._mesh, self._skeleton)
return self._skeleton
def skinning_indices_weights(self):
if self._mesh:
if self._skinning is None:
self._skinning = read_skinning(self._mesh, self.skeleton())
return self._skinning
raise Exception('no mesh')
def animation_dictionary(self, skeleton=None):
if self._animations is None:
if skeleton is None:
skeleton = self.skeleton()
self._animations = read_animations(self._scene, skeleton)
return self._animations
```
#### File: npk/animation_framework/__init__.py
```python
from pathlib import Path
import pickle
from .viewer import viewer
from . import inertialize
from . import fbxreader
from . import modifier
from . import modifier_displacement
from . import posquat
from . import skeleton
from . import utilities
resource_dir = Path(__file__).parent.resolve() / 'resources'
# global so we don't reload the character all the time
g_vertices, g_indices, g_skinningindices, g_skinningweights, g_skeleton = None, None, None, None, None
def get_character_constructor_parameters():
global g_vertices, g_indices, g_skinningindices, g_skinningweights, g_skeleton
# LOAD CHARACTER
if g_skeleton != None:
return g_vertices, g_indices, g_skinningindices, g_skinningweights, g_skeleton
try:
g_vertices, g_indices, g_skinningindices, g_skinningweights, g_skeleton = pickle.load(
open(str(resource_dir / 'simplified_man_average.dump'), 'rb'))
except Exception:
reader = fbxreader.FbxReader(str(resource_dir / 'simplified_man_average.fbx'))
x = pickle.dumps(
(*reader.vertices_and_indices(),
*reader.skinning_indices_weights(),
reader.skeleton())
)
with open(str(resource_dir / 'simplified_man_average.dump'), 'wb') as f:
f.write(x)
return g_vertices, g_indices, g_skinningindices, g_skinningweights, g_skeleton
def get_skeleton() -> skeleton.Skeleton:
return get_character_constructor_parameters()[4]
def run_main_window(widgets=None, widgets_addon=None):
if widgets is None:
widgets = [viewer.CharacterWidget(True, *get_character_constructor_parameters())]
#create widget
viewer.ViewerWindow.widgets.extend(widgets)
if widgets_addon is not None:
viewer.ViewerWindow.widgets.extend(widgets_addon)
#run
viewer.mglw.run_window_config(viewer.ViewerWindow)
```
#### File: npk/animation_framework/posquat.py
```python
import numpy as np
def vec_cross3(a, b):
"""Compute a cross product for a list of vectors"""
return np.concatenate([
a[..., 1:2] * b[..., 2:3] - a[..., 2:3] * b[..., 1:2],
a[..., 2:3] * b[..., 0:1] - a[..., 0:1] * b[..., 2:3],
a[..., 0:1] * b[..., 1:2] - a[..., 1:2] * b[..., 0:1],
], axis=-1)
def quat_mul(x, y, check_flip=False):
if check_flip:
y = np.where((np.sum(x * y, axis=-1) < 0)[..., np.newaxis].repeat(4, axis=-1), quat_flip(y), y)
x0, x1, x2, x3 = x[..., 0:1], x[..., 1:2], x[..., 2:3], x[..., 3:4]
y0, y1, y2, y3 = y[..., 0:1], y[..., 1:2], y[..., 2:3], y[..., 3:4]
return np.concatenate([
y0 * x0 - y1 * x1 - y2 * x2 - y3 * x3,
y0 * x1 + y1 * x0 - y2 * x3 + y3 * x2,
y0 * x2 + y1 * x3 + y2 * x0 - y3 * x1,
y0 * x3 - y1 * x2 + y2 * x1 + y3 * x0], axis=-1)
def quat_mul_vec(quaternions, vectors):
q2 = np.zeros_like(quaternions)
q2[..., 1:] = vectors
return (
(quat_mul(quat_conj(quaternions), quat_mul(q2, quaternions)))[..., 1:]
)
def quat_conj(x):
return np.array([1, -1, -1, -1], dtype=np.float32) * x
def quat_flip(x):
return np.array([-1, -1, -1, -1], dtype=np.float32) * x
def quat_slerp(x, y, a, eps=1e-10):
# WARNING CANNOT SLERP IDENTITY WITH IDENTITY
y = np.where((np.sum(x * y, axis=-1) < 0)[..., np.newaxis].repeat(4, axis=-1), quat_flip(y), y)
l = np.sum(x * y, axis=-1)
o = np.arccos(np.clip(l, -1.0, 1.0))
a0 = np.sin((1.0 - a) * o) / (np.sin(o) + eps)
a1 = np.sin((a) * o) / (np.sin(o) + eps)
return a0[..., np.newaxis] * x + a1[..., np.newaxis] * y
def quat_from_angle_axis(angle, axis):
x, y, z = axis[..., 0:1], axis[..., 1:2], axis[..., 2:3]
theta = angle/2
sintheta = np.ones_like(x[..., 0])[...,np.newaxis] * np.sin(theta)
return np.concatenate([
np.ones_like(x[..., 0])[...,np.newaxis] * np.cos(theta),
x * sintheta,
y * sintheta,
z * sintheta], axis=-1)
def quat_to_angle_axis(quats):
w, v = quats[..., 0:1], quats[..., 1:]
theta = np.arccos(w) * 2.0
return theta, vec_normalize(v, 1E-6)
def quat_average(Q, weights=None):
'''
Averaging Quaternions.
Arguments:
Q(ndarray): an Mx4 ndarray of quaternions.
weights(list): an M elements list, a weight for each quaternion.
'''
# TODO does not support multidimension at this point
# Form the symmetric accumulator matrix
A = np.zeros((4, 4))
M = Q.shape[0]
wSum = 0
if weights is None:
weights = np.ones(M)
for i in range(M):
q = Q[i, :]
w_i = weights[i]
A += w_i * (np.outer(q, q)) # rank 1 update
wSum += w_i
# scale
A /= wSum
# Get the eigenvector corresponding to largest eigen value
return np.linalg.eigh(A)[1][:, -1]
def vec_normalize(x, eps=1e-08):
return x / (np.sqrt(np.sum(x * x, axis=-1, keepdims=True)) + eps)
def quat_lerp(a, b, t):
b = np.where((np.sum(b * a, axis=-1) < 0)[..., np.newaxis].repeat(4, axis=-1), quat_flip(b), b)
return vec_normalize(a * (1.0 - t) + b * t)
def quat_to_m33(x):
qw, qx, qy, qz = x[..., 0:1], x[..., 1:2], x[..., 2:3], x[..., 3:4]
x2, y2, z2 = qx + qx, qy + qy, qz + qz
xx, yy, wx = qx * x2, qy * y2, qw * x2
xy, yz, wy = qx * y2, qy * z2, qw * y2
xz, zz, wz = qx * z2, qz * z2, qw * z2
return np.concatenate([
np.concatenate([1.0 - (yy + zz), xy - wz, xz + wy], axis=-1)[..., np.newaxis, :],
np.concatenate([xy + wz, 1.0 - (xx + zz), yz - wx], axis=-1)[..., np.newaxis, :],
np.concatenate([xz - wy, yz + wx, 1.0 - (xx + yy)], axis=-1)[..., np.newaxis, :],
], axis=-2)
def m33_to_quat(ts, eps=1e-10):
qs = np.empty_like(ts[..., :1, 0].repeat(4, axis=-1))
t = ts[..., 0, 0] + ts[..., 1, 1] + ts[..., 2, 2]
s = 0.5 / np.sqrt(np.maximum(t + 1, eps))
qs = np.where((t > 0)[..., np.newaxis].repeat(4, axis=-1), np.concatenate([
(0.25 / s)[..., np.newaxis],
(s * (ts[..., 2, 1] - ts[..., 1, 2]))[..., np.newaxis],
(s * (ts[..., 0, 2] - ts[..., 2, 0]))[..., np.newaxis],
(s * (ts[..., 1, 0] - ts[..., 0, 1]))[..., np.newaxis]
], axis=-1), qs)
c0 = (ts[..., 0, 0] > ts[..., 1, 1]) & (ts[..., 0, 0] > ts[..., 2, 2])
s0 = 2.0 * np.sqrt(np.maximum(1.0 + ts[..., 0, 0] - ts[..., 1, 1] - ts[..., 2, 2], eps))
qs = np.where(((t <= 0) & c0)[..., np.newaxis].repeat(4, axis=-1), np.concatenate([
((ts[..., 2, 1] - ts[..., 1, 2]) / s0)[..., np.newaxis],
(s0 * 0.25)[..., np.newaxis],
((ts[..., 0, 1] + ts[..., 1, 0]) / s0)[..., np.newaxis],
((ts[..., 0, 2] + ts[..., 2, 0]) / s0)[..., np.newaxis]
], axis=-1), qs)
c1 = (~c0) & (ts[..., 1, 1] > ts[..., 2, 2])
s1 = 2.0 * np.sqrt(np.maximum(1.0 + ts[..., 1, 1] - ts[..., 0, 0] - ts[..., 2, 2], eps))
qs = np.where(((t <= 0) & c1)[..., np.newaxis].repeat(4, axis=-1), np.concatenate([
((ts[..., 0, 2] - ts[..., 2, 0]) / s1)[..., np.newaxis],
((ts[..., 0, 1] + ts[..., 1, 0]) / s1)[..., np.newaxis],
(s1 * 0.25)[..., np.newaxis],
((ts[..., 1, 2] + ts[..., 2, 1]) / s1)[..., np.newaxis]
], axis=-1), qs)
c2 = (~c0) & (~c1)
s2 = 2.0 * np.sqrt(np.maximum(1.0 + ts[..., 2, 2] - ts[..., 0, 0] - ts[..., 1, 1], eps))
qs = np.where(((t <= 0) & c2)[..., np.newaxis].repeat(4, axis=-1), np.concatenate([
((ts[..., 1, 0] - ts[..., 0, 1]) / s2)[..., np.newaxis],
((ts[..., 0, 2] + ts[..., 2, 0]) / s2)[..., np.newaxis],
((ts[..., 1, 2] + ts[..., 2, 1]) / s2)[..., np.newaxis],
(s2 * 0.25)[..., np.newaxis]
], axis=-1), qs)
return qs
def quat_from_lookat(aim, up):
matrices = np.zeros([3, 3]) * np.ones_like(aim[..., :1, np.newaxis].repeat(3, axis=-1))
x = vec_normalize(aim)
z = vec_normalize(vec_cross3(x, up))
y = vec_normalize(vec_cross3(z, x))
matrices[..., 0, :] = x
matrices[..., 1, :] = y
matrices[..., 2, :] = z
return m33_to_quat(matrices)
def vec3_flip(x):
return np.array([-1, -1, -1], dtype=np.float32) * x
def pose_to_pq(pose):
quaternions = m33_to_quat(pose[..., :3, :3])
positions = pose[..., 3, :3]
return positions, quaternions
def pq_to_pose(pqs=None, positions=None, quaternions=None):
if pqs is not None:
positions, quaternions = pqs
matrices = np.eye(4, dtype=np.float32) * np.ones_like(positions[..., :1, np.newaxis].repeat(4, axis=-1))
matrices[..., :3, :3] = quat_to_m33(quaternions)
matrices[..., 3, :3] = positions
return matrices
def inv(pqs=None, positions=None, quaternions=None):
if pqs is not None:
positions, quaternions = pqs
qs = quat_conj(quaternions)
ps = vec3_flip(positions)
return quat_mul_vec(qs, ps), qs
def mult(a, b):
positions = quat_mul_vec(b[1], a[0])
positions += b[0]
quaternions = quat_mul(a[1], b[1])
return positions, quaternions
def sub(a, b):
positions = a[0] - b[0]
quaternions = quat_mul(quat_conj(b[1]), a[1])
return positions, quaternions
def add(a, b):
positions = a[0] + b[0]
quaternions = quat_mul(a[1], b[1])
return positions, quaternions
def lerp(a, b, t):
positions = a[0] * (1.0 - t) + b[0] * t
quaternions = quat_lerp(a[1], b[1], t)
return positions, quaternions
def transform_point(pqs, positions):
pos = quat_mul_vec(pqs[1], positions)
return pos + pqs[0]
def transform_vector(pqs, vector):
return quat_mul_vec(pqs[1], vector)
def __take_one_pq(pqs, index, as_array=True):
positions, quaternions = pqs
if as_array:
return positions[index][np.newaxis, ...], quaternions[index][np.newaxis, ...]
return positions[index], quaternions[index]
def _tests_():
a = np.array([
[5.05513623e-04, 9.98390697e-01, 5.67055689e-02, 0.00000000e+00],
[4.59858199e-02, -5.66687993e-02, 9.97333361e-01, 0.00000000e+00],
[9.98941905e-01, 2.10348673e-03, -4.59404671e-02, 0.00000000e+00],
[2.34800407e+01, 1.03402939e+02, -2.17692762e+01, 1.00000000e+00]])
b = np.array([
[9.71705084e-01, -2.32879069e-01, -3.94458240e-02, 0.00000000e+00],
[-2.06373974e-02, 8.26563821e-02, -9.96364162e-01, 0.00000000e+00],
[2.35292892e-01, 9.68986527e-01, 7.55116358e-02, 0.00000000e+00],
[2.81058541e+01, 1.51051439e+02, -2.08025977e+01, 1.00000000e+00]])
a_b = np.concatenate([a[np.newaxis, ...], b[np.newaxis, ...]], axis=0)
# test conversion back and forth
pq = pose_to_pq(a_b)
na_b = pq_to_pose(pq)
assert (np.allclose(a_b, na_b, rtol=1e-04, atol=1e-06))
# test inverse
inv_a = np.linalg.inv(a)
inv_pq = inv(pq)
inv_a_b = pq_to_pose(inv_pq)
assert (np.allclose(inv_a, inv_a_b[0, ...], rtol=1e-04, atol=1e-06))
# dot product
a_dot_b = np.dot(a, b)
p_a_dot_b = mult(__take_one_pq(pq, 0), __take_one_pq(pq, 1))
n_a_dot_b = pq_to_pose(p_a_dot_b)[0, ...]
assert (np.allclose(a_dot_b, n_a_dot_b, rtol=1e-04, atol=1e-06))
# more dimensions
pq = pose_to_pq(a_b[np.newaxis, ...])
identities = pq_to_pose(mult(pq, inv(pq)))
assert (np.allclose(identities, np.eye(4, dtype=np.float32) * np.ones([1, 2, 1, 1])))
```
#### File: animation_framework/viewer/axisrender.py
```python
import moderngl
import numpy as np
class AxisRender(object):
def __init__(self, ctx, scale=10):
self.ctx = ctx
self.program = self.ctx.program(
vertex_shader='''
#version 430
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_color;
out vec3 v_color;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
v_color = in_color;
}
''',
fragment_shader='''
#version 430
in vec3 v_color;
out vec4 f_color;
void main() {
f_color = vec4(v_color, 1.0);
}
''',
)
vertices = np.array([
# x, y ,z red, green, blue
0, 0, 0, 1, 0, 0,
scale, 0, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0,
0, scale, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1,
0, 0, scale, 0, 0, 1,
], dtype='f4')
vbo = self.ctx.buffer(vertices)
self.vao = self.ctx.vertex_array(
self.program,
[
(vbo, '3f4 3f4', 'in_vert', 'in_color')
],
)
def render(self, mvp, globalBoneMatrices=None):
if globalBoneMatrices is None:
self.program['Mvp'].write(mvp)
self.vao.render(moderngl.LINES)
else:
for b in globalBoneMatrices:
self.program['Mvp'].write((mvp * b).astype('f4'))
self.vao.render(moderngl.LINES)
```
#### File: animation/npk/animations.py
```python
from pathlib import Path
import pickle
import numpy as np
from animation_framework import modifier
from animation_framework import posquat as pq
from animation_framework import animation as AN
import animation_framework as fw
from animation_framework.utilities import compute_bone_speed, is_foot_static
from animation_framework import skeleton as sk
from animation_framework import modifier_displacement as disp
from animation_framework import fbxreader
import footphase_extraction as FPE
resource_dir = Path(__file__).parent.resolve() / 'resources'
def convert_fbx_animation(name, need_rotation=False):
skel = fw.get_skeleton()
reader = fbxreader.FbxReader(str(resource_dir / '{}.fbx'.format(name)))
animation = reader.animation_dictionary(skel)['Take 001']
if need_rotation:
animation = np.dot(animation, np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
disp.update_matrix_anim_projecting_disp_on_ground(animation)
x = pickle.dumps(animation)
with open(str(resource_dir / '{}.dump'.format(name)), 'wb') as f:
f.write(x)
def get_raw_animation(name, with_foot_phase=False):
anm = pq.pose_to_pq(pickle.load(open(str(resource_dir / '{}.dump'.format(name)), 'rb')))
animation = AN.Animation(anm, name=name)
if with_foot_phase:
raise NotImplementedError()
# use the new attribute method to add this info
return animation
def get_raw_db_animations(with_foot_phase=False):
skel = fw.get_skeleton()
animations = []
animation = get_raw_animation('on_spot', with_foot_phase=with_foot_phase)
animation.pq = modifier.lock_feet(skel, animation.pq, 5, 10)
ranges = [[33, 130], [465, 528], [558, 647], [790, 857], [892, 961], [1120, 1190], [1465, 1528]]
animations += [animation[r[0]: r[1]] for r in ranges]
animation = get_raw_animation('side_steps', with_foot_phase=with_foot_phase)
animation.pq = modifier.lock_feet(skel, animation.pq, 5, 10)
ranges = [[185,256], [256,374], [374,463], [463,550], [550,636], [636,735],
[735,816], [816,900], [900,990], [990,1080], [1080,1165], [1165,1260]]
animations += [animation[r[0]-185: r[1]-185] for r in ranges]
animation = get_raw_animation('turn_steps', with_foot_phase=with_foot_phase)
animation.pq = modifier.lock_feet(skel, animation.pq, 10, 5)
ranges = [[184, 280], [280, 378], [375, 498], [490, 576], [576, 704], [704, 811], [811, 924], [920, 1026]]
animations += [animation[r[0]-184: r[1]-184] for r in ranges]
for anim in animations:
anim.pq = disp.reset_displacement_origin(skel, anim.pq)
return animations
def generate_augmentation(skel:sk.Skeleton, animations):
# 7 first are on spot
# 12 next are side steps
# make small steps
'''
animcount = 12
for i in range(7, 7+animcount):
print('generate pass {} / {}'.format(i, animcount))
animations += [displacement.scale_displacement(skel, copy.deepcopy(animations[i]), 0.6, 0.6)]
animations = [anim for anim in animations if is_animation_valid(skel, anim)]
'''
animcount = len(animations)
for i in range(animcount):
print('generate mirrors {} / {}'.format(i, animcount))
animations += [modifier.mirror_animation(animations[i])]
'''
animcount = len(animations)
rots = [pq.quat_from_angle_axis(np.array([0 * 3.1415 / 180]), np.array([[0, 0, 1]]))]
rots += [pq.quat_from_angle_axis(np.array([25 * 3.1415 / 180]), np.array([[0, 0, 1]]))]
rots += [pq.quat_from_angle_axis(np.array([-25 * 3.1415 / 180]), np.array([[0, 0, 1]]))]
movs = [np.array([15, 0, 0])]
movs += [np.array([0, 0, 15])]
movs += [np.array([0, 0, -15])]
for i in range(animcount):
print('generate pass {} / {}'.format(i, animcount))
for rot in rots:
for mov in movs:
animations += [displacement.offset_displacement_at_end(skel, copy.deepcopy(animations[i]), mov, rot)]
'''
animcount = len(animations)
print('generate blends {}'.format(animcount * animcount))
for i in range(animcount):
for j in range(animcount):
if i != j:
try:
a = modifier.blend_anim_foot_phase(skel, animations[i], animations[j], 0.5)
animations.append(a)
except Exception:
pass
'''
animations = [anim for anim in animations if is_animation_valid(skel, anim)]
animcount = len(animations)
for i in range(animcount):
print('generate pass {} / {}'.format(i, animcount))
animations += [displacement.scale_displacement(skel, copy.deepcopy(animations[i]), 0.6, 0.8)]
animations = [anim for anim in animations if is_animation_valid(skel, anim)]
'''
print('generate {} animations'.format(len(animations)))
return animations
def save_animation_database(animations):
x = pickle.dumps(animations)
with open(str(resource_dir / 'animation_database.dump'), 'wb') as f:
f.write(x)
def load_animation_database():
return pickle.load(open(str(resource_dir / 'animation_database.dump'), 'rb'))
```
#### File: animation/npk/footphase_extraction.py
```python
import numpy as np
import sinusoidal_fit
def get_foot_phase_sinusoidal(phase, window_size=20, normalized=False):
anim_len = len(phase)
window_size = min(window_size, int(anim_len / 4))
phase_normal = phase
if normalized:
phase_normal = np.zeros(anim_len)
for t in range(anim_len):
s = max(0, t - window_size)
e = min(anim_len, t + window_size)
mean = np.mean(phase[s:e])
std = np.std(phase[s:e])
phase_normal[t] = (phase[t] - mean) / (std + 1e-8)
fitted = np.zeros((anim_len, 5), dtype=np.float32)
fitted[:, 0] = phase_normal
last_solution = None
for t in range(0, anim_len):
s = max(0, t - window_size)
e = min(anim_len, t + window_size)
lnspace = np.array(range(s, e), dtype=np.float32) / 30.0
data = phase_normal[s:e]
solution, fitness = sinusoidal_fit.fast_fit(lnspace, data, original_fit=last_solution)
#solution, fitness = sinusoidal_fit.fit(lnspace, data)
last_solution = solution
print(fitness, float(t)/anim_len)
#a, f, s, b = solution
fitted[t, 1:] = solution
#fitted[t] = a * np.sin(f * t - s) + b
#fitted[t] = (f * t - s) % (np.pi*2)
return fitted
```
#### File: animation/npk/motionmatching.py
```python
import numpy as np
import animation_framework as fw
from animation_framework import modifier
from animation_framework import posquat as pq
from animation_framework import animation as AN
from animation_framework import utilities as tr
from animation_framework import modifier_displacement as disp
from animation_framework import skeleton as skl
INITIAL_SCORE = 1
FEATURE_SCORE = 1
TIMING_SCORE = 20
class motionDB(object):
def __init__(self, stride):
self.stride = stride
self.clips = []
def build_motion_db(animations, skeleton: skl.Skeleton, stride=10):
db = motionDB(stride)
def _build_motion(animation):
animation_len = len(animation)
last_clip = None
for r in range(0, animation_len-stride-2, stride):
anim = animation[r:]
anim.pq = disp.reset_displacement_origin(skeleton, anim.pq)
anim_len = len(anim)
features_keys = list(range(0, anim_len-stride, stride))
hipspos = anim.pq[0][:, skeleton.hipsid, :]
lfpos = anim.pq[0][:, skeleton.leftfootid, :]
rfpos = anim.pq[0][:, skeleton.rightfootid, :]
hipsvec = tr.compute_vector(hipspos)
lfvec = tr.compute_vector(lfpos)
rfvec = tr.compute_vector(rfpos)
frames = anim.pq[0][:stride+1], anim.pq[1][:stride+1]
features = np.zeros((len(features_keys), 6, 3))
features[:, 0, :] = hipspos[features_keys, :]
features[:, 1, :] = lfpos[features_keys, :]
features[:, 2, :] = rfpos[features_keys, :]
features[:, 3, :] = hipsvec[features_keys, :]
features[:, 4, :] = lfvec[features_keys, :]
features[:, 5, :] = rfvec[features_keys, :]
#create the animation of this clip
clip = AN.Animation(frames, anim.name)
clip.attributes.append(AN.Attribute(features, 'mm_features', False))
if last_clip is not None:
last_clip.attributes.append(AN.Attribute([clip], 'mm_next', False))
last_clip = clip
db.clips.append(clip)
for animation in animations:
_build_motion(animation)
return db
def compute_score(feature, query):
vec = feature - query
dist = np.sum(vec * vec, axis=1) * np.array([2, 1, 1, 4, 6, 6])
return np.sum(dist)
def find_segment(a, b, t, db, skeleton, debug_dict=None):
# create a temp animation with a end b
pos, quat = np.zeros((4, len(skeleton.bindpose), 3)), np.zeros((4, len(skeleton.bindpose), 4))
pos[:2, :, :], quat[:2, :, :] = a[0][-2:, :, :], a[1][-2:, :, :]
pos[2:, :, :], quat[2:, :, :] = b[0][:2, :, :], b[1][:2, :, :]
# set in local space
pos, quat = disp.reset_displacement_origin(skeleton, (pos, quat))
hipspos = pos[:, skeleton.hipsid, :]
lfpos = pos[:, skeleton.leftfootid, :]
rfpos = pos[:, skeleton.rightfootid, :]
hipsvec = tr.compute_vector(hipspos)
lfvec = tr.compute_vector(lfpos)
rfvec = tr.compute_vector(rfpos)
if debug_dict != None:
debug_dict['segment_a_{}'.format(t)] = a
debug_dict['segment_b_{}'.format(t)] = b
debug_dict['local_{}'.format(t)] = (pos, quat)
a_query = np.zeros((6, 3))
a_query[0, :] = hipspos[1, :]
a_query[1, :] = lfpos[1, :]
a_query[2, :] = rfpos[1, :]
a_query[3, :] = hipsvec[0, :]
a_query[4, :] = lfvec[0, :]
a_query[5, :] = rfvec[0, :]
b_query = np.zeros((6, 3))
b_query[0, :] = hipspos[-2, :]
b_query[1, :] = lfpos[-2, :]
b_query[2, :] = rfpos[-2, :]
b_query[3, :] = hipsvec[-1, :]
b_query[4, :] = lfvec[-1, :]
b_query[5, :] = rfvec[-1, :]
# check all the clips for matching features
features_index = int(t / db.stride)
best_score = 1e8
best_clip = None
best_feature = -1
for clip in db.clips:
features = clip.attribute('mm_features').data
features_count = len(features)
# initial state score
init_score = compute_score(features[0], a_query)
# features scores
for f in range(1, features_count):
feature_distance_score = np.abs(f - features_index)
feature_score = compute_score(features[f], b_query)
score = init_score * INITIAL_SCORE + \
feature_distance_score * TIMING_SCORE + \
feature_score * FEATURE_SCORE
if best_score > score:
best_score = score
best_clip = clip
best_feature = f
if debug_dict != None:
debug_dict['frames_{}'.format(t)] = best_clip
return best_clip, best_score
def create_motion_transition(db, skeleton, anim_a, anim_b, transition_time, debug_dict=None):
'''
Create a transition using a pure motionmatching implementation that goes from stride to stride
:param db:
:param skeleton:
:param anim_a:
:param anim_b:
:param transition_time:
:param debug_dict:
:return:
'''
# combine transition
p, q = np.zeros((transition_time*2, len(skeleton.bones), 3)), np.zeros((transition_time*2, len(skeleton.bones), 4))
transition_len = 0
lastp, lastq = anim_a.pq[0][-2:, :, :], anim_a.pq[1][-2:, :, :]
endp, endq = anim_b.pq[0][:2, :, :], anim_b.pq[1][:2, :, :]
t = transition_time
last_segment = None
best_score = 1e8
while t >= db.stride:
segment, score = find_segment((lastp, lastq), (endp, endq), t, db, skeleton, debug_dict)
if last_segment is None or score < best_score:
segment.pq = disp.set_displacement_origin(skeleton, segment.pq, (lastp[-1, 0, :], lastq[-1, 0, :]))
last_segment = segment
best_score = score
p[transition_len: transition_len+db.stride, :, :] = last_segment.pq[0][:db.stride]
q[transition_len: transition_len + db.stride, :, :] = last_segment.pq[1][:db.stride]
transition_len += db.stride
t -= db.stride
lastp, lastq = last_segment.pq[0][-2:, :, :], last_segment.pq[1][-2:, :, :]
next_clip = last_segment.attribute('mm_next')
if next_clip is None:
last_segment = None
best_score = 1e8
else:
last_segment = next_clip.data[0]
return AN.Animation((p[:transition_len:, :, :], q[:transition_len:, :, :]), name='transition')
def create_segment_transition(db, skeleton, anim_a, anim_b, transition_time, debug_dict=None):
'''
pick the best segment in the motion database and stick to it for the entire animation length
:param db:
:param skeleton:
:param anim_a:
:param anim_b:
:param transition_time:
:param debug_dict:
:return:
'''
# combine transition
p, q = np.zeros((transition_time * 2, len(skeleton.bones), 3)), np.zeros(
(transition_time * 2, len(skeleton.bones), 4))
transition_len = 0
lastp, lastq = anim_a.pq[0][-2:, :, :], anim_a.pq[1][-2:, :, :]
endp, endq = anim_b.pq[0][:2, :, :], anim_b.pq[1][:2, :, :]
segment, score = find_segment((lastp, lastq), (endp, endq), transition_time, db, skeleton, debug_dict)
while True:
segment.pq = disp.set_displacement_origin(skeleton, segment.pq, (lastp[-1, 0, :], lastq[-1, 0, :]))
p[transition_len: transition_len + db.stride, :, :] = segment.pq[0][:db.stride]
q[transition_len: transition_len + db.stride, :, :] = segment.pq[1][:db.stride]
lastp, lastq = segment.pq[0][-2:, :, :], segment.pq[1][-2:, :, :]
next_clip = segment.attribute('mm_next')
if next_clip is None:
break
segment = next_clip.data[0]
transition_len += db.stride
return AN.Animation((p[:transition_len:, :, :], q[:transition_len:, :, :]), name='transition')
```
#### File: animation/npk/sinusoidal_fit.py
```python
import numpy as np
from scipy import optimize
def fit(X,
y,
population_count=100,
elite_count=2,
velocity_rate=0.001,
epoch_count=25):
params_count = 4
lower_limits = np.array([0, 0, -np.pi, -1])
upper_limits = np.array([1, np.pi * 2, np.pi, 1])
bounds = np.array([(l, u) for l, u in zip(lower_limits, upper_limits)])
def function(afsb, t):
return afsb[..., 0:1] * np.sin(afsb[..., 1:2] * t - afsb[..., 2:3]) + afsb[..., 3:4]
def error(params, X, y):
y_ = function(params, X)
return np.sqrt(np.sum((y - y_) ** 2, axis=-1) / X.shape[-1])
def extinctions(fitness):
return (swarm_fitness + np.min(swarm_fitness) * (
((params_count - 1.0) / (population_count - 1.0)) - 1.0)) / np.max(
swarm_fitness)
# initial population
swarm_positions = np.random.uniform(lower_limits, upper_limits, (population_count, params_count))
swarm_velocities = np.random.uniform(-0.1, 0.1, population_count * params_count).reshape(
(population_count, params_count))
swarm_fitness = error(swarm_positions, X[np.newaxis, :], y)
swarm_extinction = extinctions(swarm_fitness)
swarm_sorted_args = np.argsort(swarm_fitness, axis=0)
# global best
solution = swarm_positions[swarm_sorted_args[0], ...]
best_fitness = swarm_fitness[swarm_sorted_args[0]]
# iterate
for epoch in range(epoch_count):
# early exit if close enough
if best_fitness < 1e-6:
break
# pick elites and do a gradient descent using l-bfgs-b algorithm
for e in range(elite_count):
x, _, _ = optimize.fmin_l_bfgs_b(
func=error,
x0=swarm_positions[swarm_sorted_args[e], ...],
args=(X[np.newaxis, :], y),
approx_grad=True,
bounds=bounds,
maxiter=100)
swarm_velocities[swarm_sorted_args[e], ...] = np.random.uniform() * \
swarm_velocities[swarm_sorted_args[e], ...] + x - \
swarm_positions[swarm_sorted_args[e], ...]
swarm_positions[swarm_sorted_args[e], ...] = x
# create the offsprings
offspring_positions = np.zeros((population_count, params_count), dtype=np.float32)
offspring_velocities = np.zeros((population_count, params_count), dtype=np.float32)
offspring_fitness = np.zeros(population_count, dtype=np.float32)
# populate offsprings
for off in range(population_count):
parents_count = len(swarm_sorted_args)
# rank based selection
probabilities = np.array([parents_count - i for i in range(parents_count)], dtype=np.float32)
probabilities /= np.sum(probabilities)
a, b, prot = np.random.choice(swarm_sorted_args, 3, p=probabilities, replace=False)
# combine parents
mix_values = np.random.uniform(size=params_count)
offspring_positions[off, :] = swarm_positions[a, :] * mix_values + \
swarm_positions[b, :] * (1.0 - mix_values)
# add a bit of the velocity from the parents
offspring_positions[off, :] += velocity_rate * (swarm_velocities[a, :] + swarm_velocities[b, :])
# use the velocities from the parents
offspring_velocities[off, :] = np.random.uniform(size=params_count) * swarm_velocities[a, :] + \
np.random.uniform(size=params_count) * swarm_velocities[b, :]
# mutate
p = (np.mean(swarm_extinction[[a, b]]) * (params_count - 1.0) + 1.0) / params_count
if p < np.random.uniform():
swarm_min = np.min(swarm_positions, axis=0)
swarm_max = np.max(swarm_positions, axis=0)
x = np.random.uniform(-1, 1, size=params_count) * np.mean(swarm_extinction[[a, b]]) * (
swarm_max - swarm_min)
offspring_velocities[off, :] += x
offspring_positions[off, :] += x
# adoption
mix_values = np.random.uniform(size=params_count)
average_parents = np.mean(swarm_positions[[a, b], :], axis=0)
x = mix_values * (average_parents - offspring_positions[off, :])
mix_values = np.random.uniform(size=params_count)
x += mix_values * (offspring_positions[prot, :] - offspring_positions[off, :])
offspring_velocities[off, :] += x
offspring_positions[off, :] += x
# clip
offspring_positions[off, :] = np.clip(offspring_positions[off, :], a_min=lower_limits, a_max=upper_limits)
# compute fitness of this offspring
offspring_fitness[off] = error(offspring_positions[off, :], X, y)
# assign offsprings to population
swarm_positions = offspring_positions
swarm_velocities = offspring_velocities
swarm_fitness = offspring_fitness
# sort everyone
swarm_sorted_args = np.argsort(swarm_fitness, axis=0)
swarm_extinction = extinctions(swarm_fitness)
# try update solution
if swarm_fitness[swarm_sorted_args[0]] < best_fitness:
best_fitness = swarm_fitness[swarm_sorted_args[0]]
solution = swarm_positions[swarm_sorted_args[0], ...]
return solution, best_fitness
def fast_fit(X,
y,
population_count=200,
epoch_count=400,
original_fit=None):
weights = np.ones_like(X)
#weights[:len(X)-2] = np.linspace(0.2, 1.0, len(X) - 2)
#weights[len(X) - 2:] = np.linspace(1.0, 0.2, len(X) - 2)
def function(afsb, t):
return afsb[..., 0:1] * np.sin(afsb[..., 1:2] * t - afsb[..., 2:3]) + afsb[..., 3:4]
def error(params, X, y):
y_ = function(params, X)
return np.sqrt(np.sum(((y - y_) ** 2) * weights, axis=-1) / X.shape[-1])
params_count = 4
lower_limits = np.array([0, 0, -np.pi, -.5])
upper_limits = np.array([1, np.pi * 2, np.pi, .5])
bounds = np.array([(l, u) for l, u in zip(lower_limits, upper_limits)])
lower_limits = lower_limits[np.newaxis, :] * np.ones((population_count, 1))
upper_limits = upper_limits[np.newaxis, :] * np.ones((population_count, 1))
steps_size = (upper_limits - lower_limits) * 0.1
population = np.random.uniform(lower_limits, upper_limits, (population_count, params_count))
if original_fit is not None:
population = original_fit[np.newaxis, :] * np.ones((population_count, 1))
population = np.random.normal(population, steps_size)
fitness = error(population, X[np.newaxis, :], y)
for epoch in range(epoch_count):
new_population = np.random.normal(population, steps_size)
new_population = np.clip(new_population, a_min=lower_limits, a_max=upper_limits)
new_fitness = error(new_population, X[np.newaxis, :], y)
is_better = new_fitness < fitness
population[is_better] = new_population[is_better]
fitness[is_better] = new_fitness[is_better]
steps_size *= 0.999
sorted_args = np.argsort(fitness, axis=0)
x, f, _ = optimize.fmin_l_bfgs_b(
func=error,
x0=population[sorted_args[0], :],
args=(X[np.newaxis, :], y),
approx_grad=True,
bounds=bounds)
return x, f
```
#### File: animation/npk/test.py
```python
import pickle
import numpy as np
import animations as IN
from animation_framework import modifier_displacement as disp
from animation_framework import posquat as pq
from animation_framework import skeleton as sk
import transition_type_b as trn
def create_transition_animation(resource_dir, skel:sk.Skeleton):
anim_db = IN.load_animation_database()
mapping_db = pickle.load(open(str(resource_dir / 'mapping.dump'), 'rb'))
idle = pq.pose_to_pq(pickle.load(open(str(resource_dir / 'idle.dump'), 'rb')))
idle = disp.reset_displacement_origin(skel, idle)
relax = pq.pose_to_pq(pickle.load(open(str(resource_dir / 'idle_relax.dump'), 'rb')))
relax = disp.reset_displacement_origin(skel, relax)
alert = pq.pose_to_pq(pickle.load(open(str(resource_dir / 'idle_alerted.dump'), 'rb')))
alert = disp.reset_displacement_origin(skel, alert)
rootup = pq.quat_from_angle_axis(np.array([90 * 3.1415 / 180]), np.array([[1, 0, 0]]))
is_transition = np.ones(6000)
a = trn.create_transition(
skel,
anim_db,
mapping_db,
(idle[0][260:300, ...], idle[1][260:300, ...]),
(idle[0][1440:1500, ...], idle[1][1440:1500, ...])
)
is_transition[:40] = 0
is_transition[len(a[0]) - 60:len(a[0])] = 0
a = trn.create_transition(
skel,
anim_db,
mapping_db,
a,
(alert[0][0:40, ...], alert[1][0:40, ...])
)
is_transition[len(a[0]) - 40:len(a[0])] = 0
a = trn.create_transition(
skel,
anim_db,
mapping_db,
a,
disp.set_displacement_origin(
skel,
(idle[0][500:550, ...], idle[1][500:550, ...]),
(
np.array([0, 0, 0]),
pq.quat_mul(rootup, pq.quat_from_angle_axis(np.array([-120 * 3.1415 / 180]), np.array([[0, 1, 0]])))
)
)
)
is_transition[len(a[0]) - 50:len(a[0])] = 0
a = trn.create_transition(
skel,
anim_db,
mapping_db,
a,
disp.set_displacement_origin(
skel,
(relax[0][00:50, ...], relax[1][00:50, ...]),
(
np.array([-5, 0, 0]),
pq.quat_mul(rootup, pq.quat_from_angle_axis(np.array([-130 * 3.1415 / 180]), np.array([[0, 1, 0]])))
)
)
)
is_transition[len(a[0]) - 50:len(a[0])] = 0
a = trn.create_transition(
skel,
anim_db,
mapping_db,
a,
(idle[0][250:260, ...], idle[1][250:260, ...])
)
is_transition[len(a[0]) - 10:len(a[0])] = 0
return a, is_transition[:len(a[0])]
```
#### File: python_rnd_collection/deep_learning/deep_q_learning_atari.py
```python
import numpy as np
import gym
import collections
import cv2
import os
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
class RepeatSkipScaleStack(gym.Wrapper):
def __init__(self, env, repeat=4, output_shape=(4, 84, 84)):
super(RepeatSkipScaleStack, self).__init__(env)
self.repeat = repeat
self.frame_buffer = np.zeros((2, *env.observation_space.low.shape), dtype=np.uint8)
self.stack = collections.deque(maxlen=output_shape[0])
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=output_shape, dtype=np.float32)
def _resize_observation(self, observation):
new_frame = cv2.cvtColor(observation, cv2.COLOR_BGR2GRAY)
resize_screen = cv2.resize(new_frame, (84, 84), interpolation=cv2.INTER_AREA)
new_obs = np.array(resize_screen, dtype=np.float32).reshape(self.observation_space.low.shape[1:])
new_obs /= 255.0
return new_obs
def reset(self, **kwargs):
self.stack.clear()
observation = self.env.reset()
observation = self._resize_observation(observation)
for _ in range(self.stack.maxlen):
self.stack.append(observation)
return np.array(self.stack).reshape(self.observation_space.low.shape)
def step(self, action):
t_reward = 0.0
done = False
for i in range(self.repeat):
obs, reward, done, info = self.env.step(action)
t_reward += reward
idx = i % 2
self.frame_buffer[idx] = obs
if done:
break
max_frame = np.maximum(self.frame_buffer[0], self.frame_buffer[1])
observation = self._resize_observation(max_frame)
self.stack.append(observation)
frames = np.array(self.stack).reshape(self.observation_space.low.shape)
return frames, t_reward, done, info
def make_env(env_name, shape=(4,84,84), repeat=4):
env = gym.make(env_name)
env = RepeatSkipScaleStack(env, repeat=repeat, output_shape=shape)
return env
class ReplayBuffer(object):
def __init__(self, max_size, input_shapes):
self.mem_size = max_size
self.mem_ctr = 0
self.state = np.zeros((self.mem_size, *input_shapes), dtype=np.float32)
self.new_state = np.zeros((self.mem_size, *input_shapes), dtype=np.float32)
self.action = np.zeros(self.mem_size, dtype=np.int64)
self.reward = np.zeros(self.mem_size, dtype=np.float32)
self.terminal = np.zeros(self.mem_size, dtype=np.bool)
def store_transition(self, state, action, reward, state_, done):
idx = self.mem_ctr % self.mem_size
self.state[idx] = state
self.new_state[idx] = state_
self.action[idx] = action
self.reward[idx] = reward
self.terminal[idx] = done
self.mem_ctr += 1
def sample_buffer(self, batch_size):
max_mem = min(self.mem_size, self.mem_ctr)
batch = np.random.choice(max_mem, batch_size, replace=False)
return (
self.state[batch],
self.action[batch],
self.reward[batch],
self.new_state[batch],
self.terminal[batch]
)
class DeepQNetwork(nn.Module):
def __init__(self, lr, n_actions, name, input_dims, chkpt_dir):
super(DeepQNetwork, self).__init__()
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+'.ckp')
self.conv1 = nn.Conv2d(input_dims[0], 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
fc_input_dims = self.calculate_conv_output_dims(input_dims)
self.fc1 = nn.Linear(fc_input_dims, 512)
self.fc2 = nn.Linear(512, n_actions)
self.optimizer = optim.RMSprop(self.parameters(), lr=lr)
self.loss = nn.MSELoss()
self.device = T.device('cuda:0')
self.to(self.device)
def calculate_conv_output_dims(self, input_dims):
state = T.zeros(1, *input_dims)
dims = self.conv1(state)
dims = self.conv2(dims)
dims = self.conv3(dims)
return int(np.prod(dims.size()))
def forward(self, state):
conv1 = F.relu(self.conv1(state))
conv2 = F.relu(self.conv2(conv1))
conv3 = F.relu(self.conv3(conv2))
# conv3 shape is BS * n_filters * H * W
conv_state = conv3.view(conv3.size()[0], -1)
flat1 = F.relu(self.fc1(conv_state))
actions = self.fc2(flat1)
return actions
def save_checkpoint(self):
print('... saving checkpoint ...')
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
print('... load checkpoint ...')
self.load_state_dict(T.load(self.checkpoint_file))
class DQNAgent(object):
def __init__(self, gamma, epsilon, lr, n_actions, input_dims, mem_size, batch_size, eps_min=0.01, eps_dec=5e-7,
replace=1000, algo=None, env_name=None, chkpt_dir='tmp/dqn'):
self.gamma = gamma
self.epsilon = epsilon
self.lr = lr
self.n_actions = n_actions
self.input_dims = input_dims
self.mem_size = mem_size
self.batch_size = batch_size
self.eps_min = eps_min
self.eps_dec = eps_dec
self.replace_target = replace
self.algo = algo
self.env_name = env_name
self.chkpt_dir = chkpt_dir
self.action_space = [i for i in range(self.n_actions)]
self.learn_step_counter = 0
self.memory = ReplayBuffer(mem_size, input_dims)
self.q_eval = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_eval',
chkpt_dir=self.chkpt_dir)
self.q_next = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name + '_' + self.algo + '_q_next',
chkpt_dir=self.chkpt_dir)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor([observation], dtype=T.float).to(self.q_eval.device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def store_transition(self, state, action, reward, state_, done):
self.memory.store_transition(state, action, reward, state_, done)
def sample_memory(self):
state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)
state = T.tensor(state).to(self.q_eval.device)
action = T.tensor(action).to(self.q_eval.device)
reward = T.tensor(reward).to(self.q_eval.device)
new_state = T.tensor(new_state).to(self.q_eval.device)
done = T.tensor(done).to(self.q_eval.device)
return state, action, reward, new_state, done
def replace_target_network(self):
if self.learn_step_counter % self.replace_target == 0:
self.q_next.load_state_dict(self.q_eval.state_dict())
def decrement_epsilon(self):
self.epsilon = self.epsilon - self.eps_dec
self.epsilon = max(self.epsilon, self.eps_min)
def save_models(self):
self.q_eval.save_checkpoint()
self.q_next.save_checkpoint()
def load_models(self):
self.q_eval.load_checkpoint()
self.q_next.load_checkpoint()
def learn(self):
if self.memory.mem_ctr > self.batch_size:
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_, dones = self.sample_memory()
indices = np.arange(self.batch_size)
q_pred = self.q_eval.forward(states)[indices, actions]
q_next = self.q_next.forward(states_).max(dim=1)[0]
q_next[dones] = 0.0 #if we are done we dont have any reward
q_target = rewards + self.gamma*q_next
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
def plot_learning_curve(x, scores, epsilons, filename, lines=None):
fig=plt.figure()
ax=fig.add_subplot(111, label="1")
ax2=fig.add_subplot(111, label="2", frame_on=False)
ax.plot(x, epsilons, color="C0")
ax.set_xlabel("Training Steps", color="C0")
ax.set_ylabel("Epsilon", color="C0")
ax.tick_params(axis='x', colors="C0")
ax.tick_params(axis='y', colors="C0")
N = len(scores)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = np.mean(scores[max(0, t-20):(t+1)])
ax2.scatter(x, running_avg, color="C1")
ax2.axes.get_xaxis().set_visible(False)
ax2.yaxis.tick_right()
ax2.set_ylabel('Score', color="C1")
ax2.yaxis.set_label_position('right')
ax2.tick_params(axis='y', colors="C1")
if lines is not None:
for line in lines:
plt.axvline(x=line)
plt.savefig(filename)
if __name__ == '__main__':
np.seterr(all='raise')
env = make_env('PongNoFrameskip-v4')
best_score = -np.inf
learning_mode = True
load_checkpoint = False
n_games = 1000
agent = DQNAgent(gamma=0.99, epsilon=1.0, lr=0.0001, input_dims=env.observation_space.shape,
n_actions=env.action_space.n, mem_size=50000, eps_min=0.1, batch_size=32,
replace=1000, eps_dec=1e-5, chkpt_dir='models/', algo='DQNAgent',
env_name='PongNoFrameskip-v4')
if load_checkpoint:
agent.load_models()
fname = 'plots/{}_{}_{}_lr_{}_games.png'.format(agent.algo, agent.env_name, str(agent.lr), str(n_games))
n_steps = 0
scores, eps_history, steps_array = [], [], []
for i in range(n_games):
done = False
score = 0
observation = env.reset()
while not done:
action = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
score += reward
if learning_mode:
agent.store_transition(observation, action, reward, observation_, int(done))
agent.learn()
observation = observation_
n_steps += 1
scores.append(score)
steps_array.append(n_steps)
avg_score = np.mean(scores[-100:])
print('episode', i, 'score', score,
'average score {} best score {} epsilon {}'.format(avg_score, best_score, agent.epsilon),
'steps', n_steps)
if avg_score > best_score:
if learning_mode:
agent.save_models()
best_score = avg_score
eps_history.append(agent.epsilon)
plot_learning_curve(steps_array, scores, eps_history, fname)
```
#### File: python_rnd_collection/deep_learning/Reinforce.py
```python
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import gym
import matplotlib.pyplot as plt
class PolicyNetwork(nn.Module):
def __init__(self, lr, input_dims, n_actions):
super(PolicyNetwork, self).__init__()
if T.cuda.is_available() == False:
raise Exception('no cuda')
self.fc1 = nn.Linear(*input_dims, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.device = T.device('cuda:0')
self.to(self.device)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
actions = self.fc3(x)
return actions
class PolicyGradientAgent(object):
def __init__(self, lr, input_dims, gamma=0.99, n_actions=4):
self.gamma = gamma
self.lr = lr
self.reward_memory = []
self.action_memory = []
self.policy = PolicyNetwork(self.lr, input_dims, n_actions)
def choose_action(self, observation):
state = T.Tensor([observation]).to(self.policy.device)
probabilities = F.softmax(self.policy.forward(state), dim=1)
action_probs = T.distributions.Categorical(probabilities)
action = action_probs.sample()
log_probs = action_probs.log_prob(action)
self.action_memory.append(log_probs)
return action.item()
def store_rewards(self, reward):
self.reward_memory.append(reward)
def learn(self):
self.policy.optimizer.zero_grad()
G = np.zeros_like(self.reward_memory)
for t in range(len(self.reward_memory)):
G_sum = 0
discount = 1
for k in range(t, len(self.reward_memory)):
G_sum += self.reward_memory[k] * discount
discount *= self.gamma
G[t] = G_sum
G = T.tensor(G, dtype=T.float).to(self.policy.device)
loss = 0
for g, logprob in zip(G, self.action_memory):
loss += -g * logprob
loss.backward()
self.policy.optimizer.step()
self.action_memory = []
self.reward_memory = []
def plot_learnin_curve(scores, x, filename):
running_avg = np.zeros(len(scores))
for i in range(len(running_avg)):
running_avg[i] = np.mean(scores[max(0, i-100):(i+1)])
plt.plot(x, running_avg)
plt.title('running average of previous 100 scores')
plt.savefig(filename)
if __name__ == '__main__':
env = gym.make('LunarLander-v2')
n_games = 3000
agent = PolicyGradientAgent(gamma=0.99, lr=0.0005, input_dims=[8], n_actions=4)
fname = 'plots/{}_{}_{}_lr_{}_games.png'.format('REINFORCE', 'LunarLander', str(agent.lr), str(n_games))
scores = []
for i in range(n_games):
done = False
observation = env.reset()
score = 0
while not done:
action = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
score += reward
agent.store_rewards(reward)
observation = observation_
agent.learn()
scores.append(score)
avg_score = np.mean(scores[-100:])
print('episode', i, 'score %.2f' % score, 'average score %.2f' % avg_score)
x = [i+1 for i in range(len(scores))]
plot_learnin_curve(scores, x, fname)
```
#### File: custom_nodes_python/rbfSolver/rbfSolver.py
```python
import math, sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeTypeName = "RBFSolver"
rbfNodeId = OpenMaya.MTypeId(0x8800)
"""MATRIX MATH"""
def zeros (rows, cols):
return [[0]*cols for i in range(rows)];
def identity (rows):
M = zeros(rows, rows)
for r in range(rows):
M[r][r] = 1.0
return M
def copyMatrix (M):
return [[v for v in col] for col in M]
def transpose(A):
rowsA = len(A)
colsA = len(A[0])
return [[A[i][j] for i in range(rowsA)] for j in range(colsA)]
def scale(A, scale):
return [[v*scale for v in col] for col in A]
def dot(A, B):
rowsA = len(A)
colsA = len(A[0])
rowsB = len(B)
colsB = len(B[0])
if colsA != rowsB:
raise Exception('Number of A columns must equal number of B rows.')
C = zeros(rowsA, colsB)
for i in range(rowsA):
for j in range(colsB):
C[i][j] = sum([A[i][k] * B[k][j] for k in range(colsA)])
return C
def inverse(A):
rowsA = len(A)
colsA = len(A[0])
if rowsA != colsA:
raise Exception('Matrix must be square')
AM = copyMatrix(A)
IM = identity(rowsA)
for fd in range(rowsA):
fdScaler = 1.0 / AM[fd][fd]
for j in range(rowsA):
AM[fd][j] *= fdScaler
IM[fd][j] *= fdScaler
for i in list(range(rowsA))[0:fd] + list(range(rowsA))[fd+1:]:
crScaler = AM[i][fd]
for j in range(rowsA):
AM[i][j] = AM[i][j] - crScaler * AM[fd][j]
IM[i][j] = IM[i][j] - crScaler * IM[fd][j]
return IM
def solve(A, b):
Inv = inverse(A)
return dot(Inv, b)
"""VECTOR MATH"""
import math
def vAdd(a,b):
return [ia + ib for ia,ib in zip(a,b)]
def vSub(a,b):
return [ia + ib for ia,ib in zip(a,b)]
def vScale(a, scale):
return [ia * scale for ia in a]
def vDot(a,b):
return sum([ia * ib for ia,ib in zip(a,b)])
def vLength(a):
return math.sqrt(vDot(a,a))
def vNormalize(a):
return vScale(a, 1.0/vLength(a))
"""RBF"""
def gaussian(x, sigma=1):
return math.exp(-x * x / math.pow(sigma, 2.0))
def multiQuadratic(x, sigma=1):
return math.sqrt(1+ (x*sigma)**2)
def inverseQuadratic(x, sigma=1):
return 1.0 / (1.0 + (x*sigma)**2)
def inverseMultiQuadratic(x, sigma=1):
return 1.0 / math.sqrt(1.0 + (x*sigma)**2)
def thinPlate(r):
return r * r * math.log(max(1e-8, r))
def normDist(va, vb):
return vLength(vSub(va,vb))
def dotDist(va, vb):
d = 1.0 - vDot(vNormalize(va), vNormalize(vb))
return d
class RBF (object):
def __init__(self, centers, values, distfunc, kernelfunc ):
"""
centers {matrix} : the list of coordinates
values {matrix} : the list of values for each center
distfunc {function} : the method used to compute the distance between centers
kernelfunc {function} : the method used to compute the kernel value
"""
count = len(centers)
Phi = zeros(count, count)
for i in range(count):
for j in range(i, count):
dist = distfunc(centers[i], centers[j])
Phi[i][j] = Phi[j][i] = kernelfunc(dist)
A = Phi
b = values
self.centers = centers
self.values = values
self.dist = distfunc
self.kernel = kernelfunc
self.coeffs = solve(A, b)
def evaluate(self, centers):
"""
centers {matrix} : the list of coordinates we want to evaluate
"""
Phi = [
[self.kernel(self.dist(value, center)) for center in self.centers]
for value in centers
]
return dot(Phi, self.coeffs)
# Node definition
class RbfSolver(OpenMayaMPx.MPxNode):
# class variables
inputCenters = OpenMaya.MObject()
inputValues = OpenMaya.MObject()
inputEval = OpenMaya.MObject()
inputSigma = OpenMaya.MObject()
inputKernel = OpenMaya.MObject()
inputDistance = OpenMaya.MObject()
inputForceUpdate = OpenMaya.MObject()
output = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
self._rbf = None
def shouldSave(self):
return True
def compute(self, plug, dataBlock):
if ( plug == RbfSolver.output ):
recompute = dataBlock.inputValue( RbfSolver.inputForceUpdate ).asBool()
if self._rbf == None or recompute:
self._rbf = None
centers = dataBlock.inputArrayValue( RbfSolver.inputCenters )
values = dataBlock.inputArrayValue( RbfSolver.inputValues )
if centers.elementCount() == values.elementCount():
centersmatrix = []
valuesmatrix = []
kernel = dataBlock.inputValue( RbfSolver.inputKernel ).asInt()
dist = dataBlock.inputValue( RbfSolver.inputDistance ).asInt()
sigma = dataBlock.inputValue( RbfSolver.inputSigma ).asDouble()
gaussParam = max(0.1, sigma)
for ci in range(centers.elementCount()):
centers.jumpToElement(ci)
values.jumpToElement(ci)
centersmatrix.append ( list(centers.inputValue().asDouble3()) )
valuesmatrix.append ( list(values.inputValue().asDouble3()) )
if centersmatrix:
kernelFunction = lambda x:gaussian(x, sigma)
if kernel == 1:
kernelFunction = lambda x:multiQuadratic(x, sigma)
if kernel == 2:
kernelFunction = lambda x:inverseQuadratic(x, sigma)
if kernel == 3:
kernelFunction = lambda x:inverseMultiQuadratic(x, sigma)
if kernel == 4:
kernelFunction = lambda x:thinPlate(x)
distfunction = normDist
if dist == 1:
distfunction = dotDist
self._rbf = RBF(centersmatrix, valuesmatrix, distfunction, kernelFunction )
if self._rbf :
center = [list(dataBlock.inputValue( RbfSolver.inputEval ).asDouble3())]
result = self._rbf.evaluate(center)
outputHandle = dataBlock.outputValue( RbfSolver.output )
outputHandle.set3Double( *result[0] )
dataBlock.setClean( plug )
# creator
def nodeCreator():
return OpenMayaMPx.asMPxPtr( RbfSolver() )
# initializer
def nodeInitializer():
# input
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.inputCenters = nAttr.create( "centers", "cin", OpenMaya.MFnNumericData.k3Double, 0.0 )
nAttr.setArray(1)
nAttr.setStorable(1)
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.inputValues = nAttr.create( "values", "vin", OpenMaya.MFnNumericData.k3Double, 0.0 )
nAttr.setArray(1)
nAttr.setStorable(1)
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.inputEval = nAttr.create( "input", "in", OpenMaya.MFnNumericData.k3Double, 0.0 )
nAttr.setStorable(1)
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.inputForceUpdate = nAttr.create( "update", "update", OpenMaya.MFnNumericData.kBoolean, 0.0 )
nAttr.setStorable(1)
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.inputSigma = nAttr.create( "sigma", "sig", OpenMaya.MFnNumericData.kDouble, 1.0 )
nAttr.setStorable(1)
enAttr = OpenMaya.MFnEnumAttribute()
RbfSolver.inputKernel = enAttr.create( "kernel", "k", 0 )
enAttr.addField('Gaussian', 0)
enAttr.addField('Multi Quadratic', 1)
enAttr.addField('Inverse Quadratic', 2)
enAttr.addField('Inverse Multi Quadratic', 3)
enAttr.addField('Thin Plate', 4)
enAttr.setStorable(1)
enAttr = OpenMaya.MFnEnumAttribute()
RbfSolver.inputDistance = enAttr.create( "distance", "dis", 0 )
enAttr.addField('euclidian distance', 0)
enAttr.addField('angular distance', 1)
enAttr.setStorable(1)
# output
nAttr = OpenMaya.MFnNumericAttribute()
RbfSolver.output = nAttr.create( "output", "out", OpenMaya.MFnNumericData.k3Double, 0.0 )
nAttr.setStorable(1)
nAttr.setWritable(1)
# add attributes
RbfSolver.addAttribute( RbfSolver.inputCenters )
RbfSolver.addAttribute( RbfSolver.inputValues )
RbfSolver.addAttribute( RbfSolver.inputEval )
RbfSolver.addAttribute( RbfSolver.inputForceUpdate )
RbfSolver.addAttribute( RbfSolver.inputSigma )
RbfSolver.addAttribute( RbfSolver.inputKernel )
RbfSolver.addAttribute( RbfSolver.inputDistance )
RbfSolver.addAttribute( RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputCenters, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputValues, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputEval, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputSigma, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputKernel, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputDistance, RbfSolver.output )
RbfSolver.attributeAffects( RbfSolver.inputForceUpdate, RbfSolver.output )
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeTypeName, rbfNodeId, nodeCreator, nodeInitializer )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeTypeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( rbfNodeId )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeTypeName )
raise
```
#### File: custom_nodes_python/retargetSolver/CapsuleLink.py
```python
import mtypes as t
class CapsuleLink(object):
def __init__(self, distance, ratioA, ratioB, normalA, normalB, relativePq):
self.distance = distance
self.ratioA = ratioA
self.ratioB = ratioB
self.normalA = normalA
self.normalB = normalB
self.relativePq = relativePq
def __repr__(self):
return """CapsuleLink(
distance={},
ratioA={},
ratioB={},
normalA={},
normalB={},
relativePq={}
)""".format(self.distance, self.ratioA, self.ratioB, self.normalA, self.normalB, self.relativePq)
@classmethod
def gather(cls, capsuleA, capsuleB):
return cls(*(capsuleA.distance(capsuleB)))
def solve(self, capsuleA, capsuleB, weight=1.0, ABRatio=0.0, AOrientationRatio=1.0):
A = t.PosQuat(capsuleA.globalSurfacePosition(self.ratioA, self.normalA), capsuleA.pq.q)
B = t.PosQuat(capsuleB.globalSurfacePosition(self.ratioB, self.normalB), capsuleB.pq.q)
resultA = capsuleA.pq.copy()
resultB = capsuleB.pq.copy()
#compute the target pq
target = A * self.relativePq
localATarget = capsuleA.pq.inverse() * target
localB = capsuleB.pq.inverse() * B
if ABRatio < 0.999 :
#compute how we will move capsuleB so B matches the target
resultB = target * (B.inverse() * capsuleB.pq)
#if the ratio is not 1.0 we will move B a little then compute the motion we have to do on A to reach also the target
if ABRatio > 0.001 :
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, 1.0-ABRatio )
if ABRatio > 0.001 :
#compute how we will move primA so that target matches bPQ
goalB = (resultB * localB)
#check if we want to move only in translation or not
#in that case we change the goalB (to reach) to have the same orientation as what the target is already, so no rotation will happen
if AOrientationRatio < 0.999:
goalB = (resultB * localB)
goalB.q = t.Quaternion.lerp(target.q, goalB.q, AOrientationRatio)
resultA = goalB * ( target.inverse() * capsuleA.pq )
#check that primA has been moved completly and not only on translation
#otherwise we move back the primB to make sure we are solving the constraint
if AOrientationRatio < 0.999:
resultB = (resultA * localATarget) * ( B.inverse() * capsuleB.pq )
#solve weights
resultA = t.PosQuat.lerp( capsuleA.pq, resultA, weight )
resultB = t.PosQuat.lerp( capsuleB.pq, resultB, weight )
return resultA, resultB
```
#### File: custom_nodes_python/retargetSolver/PrimitiveLink.py
```python
import maya.api.OpenMaya as OpenMaya
import mtypes as t
import CapsuleLink as CL
class PrimitiveLink(object):
def __init__(self, primitiveIdA, capsuleIdA, primitiveIdB, capsuleIdB, weight, ABRatio, AOrientationRatio):
self.primitiveIdA = primitiveIdA
self.capsuleIdA = capsuleIdA
self.primitiveIdB = primitiveIdB
self.capsuleIdB = capsuleIdB
self.weight = weight
self.ABRatio = ABRatio
self.AOrientationRatio = AOrientationRatio
self.link = None
def __repr__(self):
return """PrimitiveLink(
primitiveIdA={},
capsuleIdA={},
primitiveIdB={},
capsuleIdB={},
weight={},
ABRatio={},
AOrientationRatio={},
link={}
)""".format(self.primitiveIdA, self.capsuleIdA, self.primitiveIdB, self.capsuleIdB, self.weight, self.ABRatio, self.AOrientationRatio, self.link)
def gather(self, primitives, skeleton):
primitiveA = primitives[self.primitiveIdA]
primitiveB = primitives[self.primitiveIdB]
capsuleA = primitiveA.capsules[self.capsuleIdA].copy()
capsuleB = primitiveB.capsules[self.capsuleIdB].copy()
capsuleA.pq = skeleton.globalPq(primitiveA.boneParent) * capsuleA.pq
capsuleB.pq = skeleton.globalPq(primitiveB.boneParent) * capsuleB.pq
self.link = CL.CapsuleLink.gather(capsuleA, capsuleB)
def solve(self, primitives, skeleton):
primitiveA = primitives[self.primitiveIdA]
primitiveB = primitives[self.primitiveIdB]
capsuleA = primitiveA.capsules[self.capsuleIdA].copy()
capsuleB = primitiveB.capsules[self.capsuleIdB].copy()
capsuleA.pq = skeleton.globalPq(primitiveA.boneParent) * capsuleA.pq
capsuleB.pq = skeleton.globalPq(primitiveB.boneParent) * capsuleB.pq
resultA, resultB = self.link.solve(capsuleA, capsuleB, self.weight, self.ABRatio, self.AOrientationRatio)
return (
resultA * primitiveA.capsules[self.capsuleIdA].pq.inverse(),
resultB * primitiveB.capsules[self.capsuleIdB].pq.inverse()
)
def create_primitive_links_compound(classtype, name):
mComp = OpenMaya.MFnCompoundAttribute()
compound = mComp.create(name, name)
setattr(classtype, name, compound)
mComp.array = True
mComp.storable = True
mComp.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
primitiveA = nAttr.create( name + "PrimitiveA", name + "PrimitiveA", OpenMaya.MFnNumericData.kInt, 0 )
setattr(classtype, name + "PrimitiveA", primitiveA)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
capsuleA = nAttr.create( name + "CapsuleA", name + "CapsuleA", OpenMaya.MFnNumericData.kInt, 0 )
setattr(classtype, name + "CapsuleA", capsuleA)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
primitiveB = nAttr.create( name + "PrimitiveB", name + "PrimitiveB", OpenMaya.MFnNumericData.kInt, 0 )
setattr(classtype, name + "PrimitiveB", primitiveB)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
capsuleB = nAttr.create( name + "CapsuleB", name + "CapsuleB", OpenMaya.MFnNumericData.kInt, 0 )
setattr(classtype, name + "CapsuleB", capsuleB)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
weight = nAttr.create( name +"Weight",name + "Weight", OpenMaya.MFnNumericData.kDouble, 1.0 )
setattr(classtype, name + "Weight", weight)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
abRatio = nAttr.create( name +"ABRatio",name + "ABRatio", OpenMaya.MFnNumericData.kDouble, 0.0 )
setattr(classtype, name + "ABRatio", abRatio)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
nAttr = OpenMaya.MFnNumericAttribute()
aOrientationRatio = nAttr.create( name +"AOrientationRatio",name + "AOrientationRatio", OpenMaya.MFnNumericData.kDouble, 1.0 )
setattr(classtype, name + "AOrientationRatio", aOrientationRatio)
nAttr.array = False
nAttr.storable = True
nAttr.writable = True
classtype.addAttribute(primitiveA)
classtype.addAttribute(capsuleA)
classtype.addAttribute(primitiveB)
classtype.addAttribute(capsuleB)
classtype.addAttribute(weight)
classtype.addAttribute(abRatio)
classtype.addAttribute(aOrientationRatio)
mComp.addChild(primitiveA)
mComp.addChild(capsuleA)
mComp.addChild(primitiveB)
mComp.addChild(capsuleB)
mComp.addChild(weight)
mComp.addChild(abRatio)
mComp.addChild(aOrientationRatio)
classtype.addAttribute(compound)
return compound
def create_primitives_links_from_input(classtype, name, dataBlock):
links = []
linksHandle = dataBlock.inputArrayValue( getattr(classtype, name))
while linksHandle.isDone() == False:
linkHandle = linksHandle.inputValue()
primitiveA = linkHandle.child(getattr(classtype, name + "PrimitiveA")).asInt()
capsuleA = linkHandle.child(getattr(classtype, name + "CapsuleA")).asInt()
primitiveB = linkHandle.child(getattr(classtype, name + "PrimitiveB")).asInt()
capsuleB = linkHandle.child(getattr(classtype, name + "CapsuleB")).asInt()
weight = linkHandle.child(getattr(classtype, name + "Weight")).asDouble()
abRatio = linkHandle.child(getattr(classtype, name + "ABRatio")).asDouble()
aOrientationRatio = linkHandle.child(getattr(classtype, name + "AOrientationRatio")).asDouble()
links.append(PrimitiveLink(
primitiveA,
capsuleA,
primitiveB,
capsuleB,
weight,
abRatio,
aOrientationRatio
))
linksHandle.next()
return links
```
|
{
"source": "JeromeErasmus/browserstack_automate",
"score": 2
}
|
#### File: automate/server/__init__.py
```python
import os
from flask import Flask, render_template
#from flask_login import LoginManager
#from flask_bcrypt import Bcrypt
from flask_debugtoolbar import DebugToolbarExtension
#from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
################
#### config ####
################
app = Flask(
__name__,
template_folder='../client/templates',
static_folder='../client/static'
)
app_settings = 'automate.server.config.DevelopmentConfig'#os.getenv('AUTOMATE_APP_SETTINGS', 'automate.server.config.DevelopmentConfig')
app.config.from_object(app_settings)
####################
#### extensions ####
####################
toolbar = DebugToolbarExtension(app)
#bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
###################
### blueprints ####
###################
#from automate.server.user.views import user_blueprint
from automate.server.main.views import main_blueprint
#app.register_blueprint(user_blueprint)
app.register_blueprint(main_blueprint)
#########################
##### error handlers ####
#########################
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/404.html"), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/500.html"), 500
```
#### File: JeromeErasmus/browserstack_automate/manage.py
```python
from subprocess import Popen, PIPE, CalledProcessError
import platform
import os
import unittest
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from automate.server import app, db
from automate.server.models import Project, Config
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('automate/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_data():
"""Creates sample data."""
pass
@manager.command
def install():
""" Install the app """
create_db()
create_data()
if __name__ == '__main__':
manager.run()
```
|
{
"source": "JeromeErasmus/raycast-aws",
"score": 2
}
|
#### File: raycast-aws/src/rds_describe_db_clusters.py
```python
import sys
import botocore
import botostubs
import boto3
from core.config import AWSConfig
from core.requests import AWSRequests
from core.functions import Functions
config = AWSConfig()
client = config.session.client('rds') # type: botostubs.RDS
table_headers = {
'DBClusterIdentifier': 'DBClusterIdentifier',
'Status': 'Status',
'BackupRetentionPeriod': 'Retention',
'Capacity': 'Capacity',
'EngineMode': 'EngineMode'}
table_columns = {'DBClusterIdentifier', 'Status',
'BackupRetentionPeriod', 'EngineMode', 'Capacity'}
def describe_db_clusters(*args):
response = AWSRequests.send_request(
client.describe_db_clusters
)
items = Functions.search_list(args[0], 'DBClusterIdentifier', response['DBClusters'])
Functions.display(items, table_headers, table_columns)
if len(sys.argv) > 1:
describe_db_clusters(sys.argv[1])
else:
describe_db_clusters(None)
exit(0)
```
#### File: raycast-aws/src/ssm_get_parameter.py
```python
import sys
import botocore
import botostubs
import boto3
from collections import OrderedDict
from core.config import AWSConfig
from core.requests import AWSRequests
from core.functions import Functions, Fontcol
config = AWSConfig()
client = config.session.client('ssm') # type: botostubs.SSM
table_headers = {'Name': 'Name', 'Value': 'Value'}
table_columns = {'Name', 'Value'}
def get_parameter(*args):
response = AWSRequests.send_request(
client.get_parameter,
Name=args[0]
)
Functions.display([response['Parameter']], table_headers, table_columns)
Functions.copyClipboard(response['Parameter']['Value'])
if len(sys.argv) > 1:
get_parameter(sys.argv[1])
else:
get_parameter(None)
exit(0)
```
|
{
"source": "JeromeErasmus/raycast-commands",
"score": 3
}
|
#### File: src/core/config.py
```python
from dotenv import dotenv_values
from github import Github
from jira import JIRA
__all__ = ['CommandsConfig', 'get_github_client', 'get_jira_client']
class CommandsConfig:
config = None
github_client = None
jira_client = None
github_repo = None
github_branch = None
def __init__(self, **kwargs):
self.config = dotenv_values()
self.github_client = Github(self.config['GITHUB_TOKEN'], per_page=30)
self.jira_client = JIRA(server=self.config['JIRA_SERVER'], basic_auth=(self.config['JIRA_USER_EMAIL'],
self.config['JIRA_TOKEN']))
if kwargs['repository']:
self.github_repo = kwargs['repository']
else:
self.github_repo = self.config['GITHUB_DEFAULT_REPOSITORY']
if kwargs['branch']:
self.github_branch= kwargs['branch']
else:
self.github_branch = self.config['GITHUB_DEFAULT_BRANCH']
def get_github_client(self, **kwargs):
"""Gets a Github Client configuration
"""
return self.github_client
def get_jira_client(self, **kwargs):
"""Gets a Jira Client configuration
"""
return self.jira_client
```
#### File: raycast-commands/src/git_create_release.py
```python
import sys
import re
import json
from core.config import CommandsConfig
from datetime import datetime
from itertools import groupby
from jira import JIRA, JIRAError
from github import GithubException
from core.functions import Functions, Fontcol
config = None
github_client = None
jira_client = None
repo = None
def main(*args):
global config, github_client, jira_client, repo
config = CommandsConfig(repository=args[0], branch=args[1])
github_client = config.get_github_client()
jira_client = config.get_jira_client()
repo = get_repository()
last_release = get_last_release()
if not last_release:
print('Error. Previous release not found')
return False
if last_release.prerelease:
print('Previous release is a PreRelease. Publish the previous release')
return False
issues = create_issues_list(last_release)
grouped_issues = group_issues_list(issues)
notes = format_notes(grouped_issues)
create_release(last_release, notes)
def create_issues_list(release):
issues = []
for issue in search_issues(release):
ticket_key = extract_ticket(issue.title)
lable_name = extract_lable(issue.title)
issues.append(dict(
number=issue.number,
title=issue.title,
ticket_key=ticket_key,
lable_name=lable_name,
))
return issues
def group_issues_list(issues):
sorted_issues = sorted(issues, key=sort_key_func)
grouped = dict()
for key, value in groupby(sorted_issues, sort_key_func):
grouped[key] = dict(children=list(value), ticket_key=key)
for key in grouped:
issue = grouped[key]
jira_issue = get_jira_issue(issue['ticket_key'])
if not jira_issue:
issue['valid_issue'] = False
else:
issue['valid_issue'] = True
issue['issue_summary'] = jira_issue.fields.summary
return grouped
def format_notes(issues):
notes = []
for key in issues:
issue = issues[key]
summary = []
numbers = []
for child in issue['children']:
numbers.append(str(child['number']))
if not issue['valid_issue']:
for child in issue['children']:
summary.append(child['lable_name'])
else:
summary = [issue['issue_summary']]
notes.append("#{0} [{1}] {2} \n\r".format(
' #'.join(numbers),
issue['ticket_key'],
', '.join(summary),
)
)
return ''.join(notes)
def get_jira_issue(ticket_key):
try:
issue = jira_client.issue(ticket_key)
if issue:
return issue
except JIRAError as error:
pass
return None
def extract_ticket(string):
ticket_label = re.search(r"(?<=\[)(.*?)(?=\])", string)
if ticket_label:
return ticket_label.group(0).lstrip().rstrip()
else:
return ''
def extract_lable(string):
index = string.rfind(']')
if index != -1:
return string[index+1:].lstrip().capitalize()
else:
return ''
def search_issues(release):
date = release.published_at.strftime('%Y-%m-%dT%H:%M:%S')
query = 'repo:{0} type:pr merged:>{1}'.format(config.github_repo, date)
try:
result = github_client.search_issues(query=query)
return result
except GithubException as error:
print(error)
return False
def sort_key_func(k):
return k['ticket_key']
def get_repository():
try:
repo = github_client.get_repo(config.github_repo)
if repo:
return repo
except GithubException as error:
print(error)
return False
def get_last_release():
try:
releases = repo.get_releases()
if releases and releases[0]:
return releases[0]
except GithubException as error:
print(error)
return False
def get_branch_head():
try:
branch = repo.get_branch(config.github_branch)
return branch
except GithubException as error:
print(error)
return False
def create_release(last_release, notes):
if not last_release:
print('Error. Last release not found')
return False
last_tag = last_release.tag_name
m = int(last_tag[last_tag.rfind('.')+1:]) + 1
tag = last_tag[:last_tag.rfind('.')+1] + str(m)
name = "{0}-{1}".format(tag, datetime.today().strftime('%Y-%m-%d'))
try:
release = repo.create_git_release(
tag=tag,
name=name,
prerelease=True,
message=notes,
target_commitish=config.github_branch
)
if release:
print(Fontcol.YELLOW, 'Version: {0}'.format(name))
print('Url: {0}'.format(release.html_url))
print(Fontcol.WHITE, '\n{0}\n{1}'.format('-'*10, notes))
except GithubException as error:
print(error)
return False
if len(sys.argv) > 1:
main(sys.argv[1], sys.argv[2])
else:
print('Error. Invalid argumnet count')
exit(0)
```
|
{
"source": "jeromefiot/FEED2TW",
"score": 3
}
|
#### File: FEED2TW/app/classes_feed.py
```python
import feedparser
import tweepy
import time
from datetime import datetime
import random
from threading import Timer
from flask import current_app, flash
from flask.ext.login import current_user
from . import db
from . import infos_tweet
from .models import Feed, Article
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# THREADING TEST
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
class RepeatedTimer(object):
"""
Run function (arg or not) every interval seconds
http://stackoverflow.com/questions/3393612/
run-certain-code-every-n-seconds
"""
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class RssFlux():
"""
Activate get_articles (and func to deactivate : desactivate_collect).
Activate tweet_articles (and func to deactivate : desactivate_tweet).
functions :
> refresh (default rate = 1800 sec.)
> activate() / deactivate()
> get_articles
> Tweet articles from (self) Feed
"""
def __init__(self, idflux):
"""Connection init."""
self.app = current_app._get_current_object()
self.idflux = idflux
flux_info = Feed.query.filter_by(id=self.idflux).first()
self.name = flux_info.name
self.url = flux_info.url
self.collect_actif = flux_info.collect_actif
self.Tweet_actif = flux_info.Tweet_actif
# resfresh rate for geting articles (28800.0 = 8h)
self.refresh = 610.0 # every 10mn
# self.frequency = (24/flux_info.frequency) * 3600
self.frequency = 600.0 # every 10mn
if flux_info.hashtag:
self.hashtag = flux_info.hashtag
else:
self.hashtag = ''
self.rt = None
self.rt2 = None
# thread name
# self.name_Thread = '{0} {1}'.format('thread', idflux)
# print self.name_Thread
def get_articles(self):
"""Get every self.refresh all new artle of feed and insert bdd."""
# repeat in a thread every self.refresh the get_articles function
# self.name_Thread = threading.Timer(self.refresh, self.get_articles).start()
# Timer(self.refresh, self.get_articles).start()
rss = self.url
feeds = feedparser.parse(rss)
with self.app.app_context():
db.session.expunge_all()
# titles list of all articles in bdd
title_articles = [element.title for element in
Article.query.filter(Article.feed_id == self.idflux)]
# list title/link from last 10 items of Rss feed not in bdd
feedss = [(feeds.entries[i]['title'], feeds.entries[i]['link'])
for i in range(1, 10)
if feeds.entries[i]['title'] not in title_articles]
# Add new items from list feedss to bdd
for elem in feedss:
article = Article(title=elem[0],
url=elem[1],
feed_id=self.idflux)
db.session.add(article)
db.session.commit()
print "SCRAPP ARTICLE EFFECTUE"
def tweet_articles(self):
"""Format and tweet articles from bdd for self.flux."""
with self.app.app_context():
articles_to_tweet = Article.query.\
filter(Article.feed_id == self.idflux).\
filter(Article.tweeted == 0).all()
# checkingarticles to tweet
if articles_to_tweet:
auth = tweepy.OAuthHandler(infos_tweet.Key_consumer, infos_tweet.Consumer_secret)
auth.set_access_token(infos_tweet.Access_token, infos_tweet.Access_token_secret)
api = tweepy.API(auth)
try:
for tweets in articles_to_tweet:
# TITLE // LINK -> tweet_content
title = tweets.title[:100]
link_article = tweets.url
# FEED name for VIA -> tweet_content
name_feed = Feed.query.\
filter(Feed.id == Article.feed_id).first()
via_article = name_feed.name.split()[0]
tweet_content = "%s // %s - via %s" %\
(title, link_article, via_article)
# update twitted
tweets.tweeted = 1
tweets.date_tweeted = datetime.utcnow()
db.session.commit()
# send it
api.update_status(tweet_content)
# wait randomly
time.sleep(600 + random.randint(30, 60))
print "Tweet ID : "+str(tweets.id)+" : ENVOYE"
# check rate limit
except tweepy.RateLimitError:
print "Rate limite reach...sarace"
time.sleep(16 * 60)
else:
# no tweet to send
message = flash('No tweets to send')
print message
def activate_get(self):
"""Activate Flux to get Articles."""
print self.collect_actif
if not self.collect_actif:
print "enter activate_get"
self.rt2 = RepeatedTimer(self.refresh, self.get_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = True
db.session.commit()
print self.rt2
else:
print 'Collect already enable'
def desactivate_get(self):
"""Desactivate Flux to get Articles."""
if self.rt2:
self.rt2.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = False
db.session.commit()
else:
print 'Collect already disable'
def activate_tweet(self):
"""Activate Flux to get Articles."""
print "State TWEET (Tweet_actif) : "
print self.Tweet_actif
if not self.Tweet_actif:
print "enter activate_tweet"
self.rt = RepeatedTimer(self.frequency, self.tweet_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = True
db.session.commit()
print self.rt
else:
print 'Tweet already enable'
def desactivate_tweet(self):
"""Desactivate Flux to get Articles."""
if self.rt:
self.rt.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = False
db.session.commit()
else:
print 'Tweet already disable'
def state(self):
"""Print effective actions (tweet_articles / get_articles)."""
if self.rt.is_running is True:
if self.rt2.is_running is True:
return self.name+" : Collecting and Tweeting actif."
return self.name+" : Tweeting is actif."
elif self.rt2.is_running is True:
return self.name+" : Collecting is actif."
else:
print 'No actions'
def print_info(self):
self.attrs = vars(self)
print ', '.join("%s: %s" % item for item in self.attrs.items())
if __name__ == '__main__':
pass
```
|
{
"source": "jeromefiot/FLASK_base",
"score": 2
}
|
#### File: static/img/views.py
```python
from flask import render_template, redirect, url_for, abort, flash, request, current_app
from flask.ext.login import login_required, current_user
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.admin.contrib.fileadmin import FileAdmin
from . import main
from ..email import send_email
from .forms import EditProfileForm, EditProfileAdminForm, AddClientForm, ContactForm
from .. import db, admin
from ..models import Role, User, Client, Document
from ..decorators import admin_required
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# PAGES PUBLIQUES
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
@main.route('/')
def index():
return render_template('index.html')
@main.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
###################################################
# prevoir 2 mails : un pour admin et un pour envoyeur
###################################################
if form.validate_on_submit():
app = current_app._get_current_object()
# envoi a l'admin
send_email(app.config['FLASKY_ADMIN'], form.titre.data,
'/mail/contact',
envoyeur=form.nom.data,
mail=form.mail.data,
message=form.message.data,
depuis=app.config['FLASKY_MAIL_SUBJECT_PREFIX'])
# envoi merci a l'envoyeur
send_email(form.mail.data, 'Confirmation message',
'/mail/merci_contact')
flash('Message bien envoye !')
return redirect(url_for('main.index'))
return render_template('contact.html', form=form)
@main.route('/credits')
def credits():
return render_template('credits.html')
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# PAGES INSCRITS
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
@main.route('/user/<username>')
@login_required
###################################################
# prevoir un login restricted a cet utilisateur only
###################################################
def user(username):
user = User.query.filter_by(username=username).first_or_404()
clients = Client.query.filter_by(user_id=user.id).all()
return render_template('user.html', user=user, clients=clients)
@main.route('/user/clients/<username>')
@login_required
###################################################
# prevoir un login restricted a cet utilisateur only
###################################################
def clients_user(username):
user = User.query.filter_by(username=username).first_or_404()
clients = Client.query.filter_by(user_id=user.id).all()
return render_template('clients_user.html', user=user, clients=clients)
@main.route('/user/ajout_client/<username>', methods=['GET', 'POST'])
@login_required
###################################################
# prevoir un login restricted a cet utilisateur only
###################################################
def add_user(username):
form = AddClientForm()
user = User.query.filter_by(username=username).first_or_404()
if form.validate_on_submit():
client = Client(nom=form.nom.data,
entreprise=form.entreprise.data,
telephone = form.telephone.data,
mail = form.mail.data,
adresse = form.adresse.data,
codepostal = form.codepostal.data,
user_id=current_user.id)
db.session.add(client)
flash('Client ajoute.')
return redirect(url_for('.clients_user', username=current_user.username))
return render_template('add_user.html', user=user, form=form)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.telephone = form.telephone.data
current_user.location = form.location.data
current_user.codepostal = form.codepostal.data
current_user.entreprise = form.entreprise.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.telephone.data = current_user.telephone
form.location.data = current_user.location
form.codepostal.data = current_user.codepostal
form.entreprise.data = current_user.entreprise
return render_template('edit_profile.html', form=form)
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# PAGES @ADMIN_REQUIRED
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.telephone = form.telephone.data
user.location = form.location.data
user.codepostal = form.codepostal.data
user.entreprise = form.entreprise.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.telephone.data = user.telephone
form.location.data = user.location
form.codepostal.data = user.codepostal
form.entreprise.data = user.entreprise
return render_template('edit_profile.html', form=form, user=user)
@main.route('/list_users/', methods=['GET', 'POST'])
@login_required
@admin_required
def list_users():
user = User.query.all()
if request.method == 'POST':
flash("Suppression de id")
return render_template('list_users.html', user=user)
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# PAGES ADMIN
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
class UserModelView(ModelView):
# remove "password_hash"
# http://flask-admin.readthedocs.org/en/latest/api/mod_model/#flask.ext.admin.model.BaseModelView)
column_exclude_list = ('password_hash')
# Accessible only by admin
@admin_required
def is_accessible(self):
return current_user.is_administrator()
class ClientModelView(ModelView):
# Accessible only by admin
@admin_required
def is_accessible(self):
return current_user.is_administrator()
class DocumentModelView(ModelView):
# Accessible only by admin
@admin_required
def is_accessible(self):
return current_user.is_administrator()
admin.add_view(UserModelView(User, db.session))
admin.add_view(ClientModelView(Client, db.session))
admin.add_view(DocumentModelView(Document, db.session))
# pour la vue avec tous les arg, sinon Myview3
#admin.add_view(ModelView(User, db.session))
```
|
{
"source": "jeromefischer/fountain",
"score": 3
}
|
#### File: jeromefischer/fountain/Valve.py
```python
import RPi.GPIO as GPIO
from Logger import logger
class Valve:
def __init__(self, pin, name='Valve'):
self.pin = pin
self.name = name
self.init_gpio()
def init_gpio(self):
logger.info('initialize: {} pin: {}'.format(self.name, self.pin))
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
def get_valve_status(self):
pass
def set_valve_on(self):
GPIO.output(self.pin, GPIO.HIGH) # Turn valve on
logger.info('valve switched on')
pass
def set_valve_off(self):
GPIO.output(self.pin, GPIO.LOW) # Turn valve off
logger.info('valve switched off')
pass
```
|
{
"source": "jerome-f/polyfun",
"score": 3
}
|
#### File: polyfun/ldsc_polyfun/irwls.py
```python
import numpy as np
from . import jackknife as jk
import logging
class IRWLS(object):
'''
Iteratively re-weighted least squares (FLWS).
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
update_func : function
Transforms output of np.linalg.lstsq to new weights.
n_blocks : int
Number of jackknife blocks (for estimating SE via block jackknife).
w : np.matrix with shape (n, 1)
Initial regression weights (default is the identity matrix). These should be on the
inverse CVF scale.
slow : bool
Use slow block jackknife? (Mostly for testing)
Attributes
----------
est : np.matrix with shape (1, p)
IRWLS estimate.
jknife_est : np.matrix with shape (1, p)
Jackknifed estimate.
jknife_var : np.matrix with shape (1, p)
Variance of jackknifed estimate.
jknife_se : np.matrix with shape (1, p)
Standard error of jackknifed estimate, equal to sqrt(jknife_var).
jknife_cov : np.matrix with shape (p, p)
Covariance matrix of jackknifed estimate.
delete_values : np.matrix with shape (n_blocks, p)
Jackknife delete values.
Methods
-------
wls(x, y, w) :
Weighted Least Squares.
_weight(x, w) :
Weight x by w.
'''
def __init__(self, x, y, update_func, n_blocks, w=None, slow=False, separators=None):
n, p = jk._check_shape(x, y)
if w is None:
w = np.ones_like(y)
if w.shape != (n, 1):
raise ValueError(
'w has shape {S}. w must have shape ({N}, 1).'.format(S=w.shape, N=n))
jknife = self.irwls(
x, y, update_func, n_blocks, w, slow=slow, separators=separators)
self.est = jknife.est
self.jknife_se = jknife.jknife_se
self.jknife_est = jknife.jknife_est
self.jknife_var = jknife.jknife_var
self.jknife_cov = jknife.jknife_cov
self.delete_values = jknife.delete_values
self.separators = jknife.separators
@classmethod
def irwls(cls, x, y, update_func, n_blocks, w, slow=False, separators=None):
'''
Iteratively re-weighted least squares (IRWLS).
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
update_func: function
Transforms output of np.linalg.lstsq to new weights.
n_blocks : int
Number of jackknife blocks (for estimating SE via block jackknife).
w : np.matrix with shape (n, 1)
Initial regression weights.
slow : bool
Use slow block jackknife? (Mostly for testing)
separators : list or None
Block jackknife block boundaries (optional).
Returns
-------
jknife : jk.LstsqJackknifeFast
Block jackknife regression with the final IRWLS weights.
'''
(n, p) = x.shape
if y.shape != (n, 1):
raise ValueError(
'y has shape {S}. y must have shape ({N}, 1).'.format(S=y.shape, N=n))
if w.shape != (n, 1):
raise ValueError(
'w has shape {S}. w must have shape ({N}, 1).'.format(S=w.shape, N=n))
w = np.sqrt(w)
for i in range(2): # update this later
new_w = np.sqrt(update_func(cls.wls(x, y, w)))
if new_w.shape != w.shape:
logging.info('IRWLS update:', new_w.shape, w.shape)
raise ValueError('New weights must have same shape.')
else:
w = new_w
x = cls._weight(x, w)
y = cls._weight(y, w)
if slow:
jknife = jk.LstsqJackknifeSlow(
x, y, n_blocks, separators=separators)
else:
jknife = jk.LstsqJackknifeFast(
x, y, n_blocks, separators=separators)
return jknife
@classmethod
def wls(cls, x, y, w):
'''
Weighted least squares.
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
w : np.matrix with shape (n, 1)
Regression weights (1/CVF scale).
Returns
-------
coef : list with four elements (coefficients, residuals, rank, singular values)
Output of np.linalg.lstsq
'''
(n, p) = x.shape
if y.shape != (n, 1):
raise ValueError(
'y has shape {S}. y must have shape ({N}, 1).'.format(S=y.shape, N=n))
if w.shape != (n, 1):
raise ValueError(
'w has shape {S}. w must have shape ({N}, 1).'.format(S=w.shape, N=n))
x = cls._weight(x, w)
y = cls._weight(y, w)
coef = np.linalg.lstsq(x, y)
return coef
@classmethod
def _weight(cls, x, w):
'''
Weight x by w.
Parameters
----------
x : np.matrix with shape (n, p)
Rows are observations.
w : np.matrix with shape (n, 1)
Regression weights (1 / sqrt(CVF) scale).
Returns
-------
x_new : np.matrix with shape (n, p)
x_new[i,j] = x[i,j] * w'[i], where w' is w normalized to have sum 1.
Raises
------
ValueError :
If any element of w is <= 0 (negative weights are not meaningful in WLS).
'''
if np.any(w <= 0):
raise ValueError('Weights must be > 0')
(n, p) = x.shape
if w.shape != (n, 1):
raise ValueError(
'w has shape {S}. w must have shape (n, 1).'.format(S=w.shape))
w = w / float(np.sum(w))
x *= w
return x
```
#### File: polyfun/ldsc_polyfun/jackknife.py
```python
import numpy as np
from scipy.optimize import nnls
np.seterr(divide='raise', invalid='raise')
from tqdm import tqdm
from sklearn.linear_model import Lasso
import logging
import warnings
warnings.filterwarnings('ignore', message='Coordinate descent with alpha=0 may lead to unexpected results and is discouraged.')
warnings.filterwarnings('ignore', message='Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.')
from sklearn.metrics import r2_score
def _check_shape(x, y):
'''Check that x and y have the correct shapes (for regression jackknives).'''
if len(x.shape) != 2 or len(y.shape) != 2:
raise ValueError('x and y must be 2D arrays.')
if x.shape[0] != y.shape[0]:
raise ValueError(
'Number of datapoints in x != number of datapoints in y.')
if y.shape[1] != 1:
raise ValueError('y must have shape (n_snp, 1)')
n, p = x.shape
if p > n:
raise ValueError('More dimensions than datapoints.')
return (n, p)
def _check_shape_block(xty_block_values, xtx_block_values):
'''Check that xty_block_values and xtx_block_values have correct shapes.'''
if xtx_block_values.shape[0:2] != xty_block_values.shape:
raise ValueError(
'Shape of xty_block_values must equal shape of first two dimensions of xty_block_values.')
if len(xtx_block_values.shape) < 3:
raise ValueError('xtx_block_values must be a 3D array.')
if xtx_block_values.shape[1] != xtx_block_values.shape[2]:
raise ValueError(
'Last two axes of xtx_block_values must have same dimension.')
return xtx_block_values.shape[0:2]
class Jackknife(object):
'''
Base class for jackknife objects. Input involves x,y, so this base class is tailored
for statistics computed from independent and dependent variables (e.g., regressions).
The __delete_vals_to_pseudovalues__ and __jknife__ methods will still be useful for other
sorts of statistics, but the __init__ method will need to be overriden.
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
n_blocks : int
Number of jackknife blocks
*args, **kwargs :
Arguments for inheriting jackknives.
Attributes
----------
n_blocks : int
Number of jackknife blocks
p : int
Dimensionality of the independent varianble
N : int
Number of datapoints (equal to x.shape[0])
Methods
-------
jknife(pseudovalues):
Computes jackknife estimate and variance from the jackknife pseudovalues.
delete_vals_to_pseudovalues(delete_vals, est):
Converts delete values and the whole-data estimate to pseudovalues.
get_separators():
Returns (approximately) evenly-spaced jackknife block boundaries.
'''
def __init__(self, x, y, n_blocks=None, separators=None):
self.N, self.p = _check_shape(x, y)
if separators is not None:
if max(separators) != self.N:
raise ValueError(
'Max(separators) must be equal to number of data points.')
if min(separators) != 0:
raise ValueError('Max(separators) must be equal to 0.')
self.separators = sorted(separators)
self.n_blocks = len(separators) - 1
elif n_blocks is not None:
self.n_blocks = n_blocks
self.separators = self.get_separators(self.N, self.n_blocks)
else:
raise ValueError('Must specify either n_blocks are separators.')
if self.n_blocks > self.N:
raise ValueError('More blocks than data points.')
@classmethod
def jknife(cls, pseudovalues):
'''
Converts pseudovalues to jackknife estimate and variance.
Parameters
----------
pseudovalues : np.matrix pf floats with shape (n_blocks, p)
Returns
-------
jknife_est : np.matrix with shape (1, p)
Jackknifed estimate.
jknife_var : np.matrix with shape (1, p)
Variance of jackknifed estimate.
jknife_se : np.matrix with shape (1, p)
Standard error of jackknifed estimate, equal to sqrt(jknife_var).
jknife_cov : np.matrix with shape (p, p)
Covariance matrix of jackknifed estimate.
'''
n_blocks = pseudovalues.shape[0]
jknife_cov = np.atleast_2d(np.cov(pseudovalues.T, ddof=1) / n_blocks)
jknife_var = np.atleast_2d(np.diag(jknife_cov))
jknife_se = np.atleast_2d(np.sqrt(jknife_var))
jknife_est = np.atleast_2d(np.mean(pseudovalues, axis=0))
return (jknife_est, jknife_var, jknife_se, jknife_cov)
@classmethod
def delete_values_to_pseudovalues(cls, delete_values, est):
'''
Converts whole-data estimate and delete values to pseudovalues.
Parameters
----------
delete_values : np.matrix with shape (n_blocks, p)
Delete values.
est : np.matrix with shape (1, p):
Whole-data estimate.
Returns
-------
pseudovalues : np.matrix with shape (n_blocks, p)
Psuedovalues.
Raises
------
ValueError :
If est.shape != (1, delete_values.shape[1])
'''
n_blocks, p = delete_values.shape
if est.shape != (1, p):
raise ValueError(
'Different number of parameters in delete_values than in est.')
return n_blocks * est - (n_blocks - 1) * delete_values
@classmethod
def get_separators(cls, N, n_blocks):
'''Define evenly-spaced block boundaries.'''
return np.floor(np.linspace(0, N, n_blocks + 1)).astype(int)
class LstsqJackknifeSlow(Jackknife):
'''
Slow linear-regression block jackknife. This class computes delete values directly,
rather than forming delete values from block values. Useful for testing and for
non-negative least squares (which as far as I am aware does not admit a fast block
jackknife algorithm).
Inherits from Jackknife class.
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
n_blocks : int
Number of jackknife blocks
nn: bool
Non-negative least-squares?
Attributes
----------
est : np.matrix with shape (1, p)
FWLS estimate.
jknife_est : np.matrix with shape (1, p)
Jackknifed estimate.
jknife_var : np.matrix with shape (1, p)
Variance of jackknifed estimate.
jknife_se : np.matrix with shape (1, p)
Standard error of jackknifed estimate, equal to sqrt(jknife_var).
jknife_cov : np.matrix with shape (p, p)
Covariance matrix of jackknifed estimate.
delete_vals : np.matrix with shape (n_blocks, p)
Jackknife delete values.
'''
@classmethod
def delete_values(cls, x, y, func, s):
'''
Compute delete values by deleting one block at a time.
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
func : function (n, p) , (n, 1) --> (1, p)
Function of x and y to be jackknived.
s : list of ints
Block separators.
Returns
-------
delete_values : np.matrix with shape (n_blocks, p)
Delete block values (with n_blocks blocks defined by parameter s).
Raises
------
ValueError :
If x.shape[0] does not equal y.shape[0] or x and y are not 2D.
'''
_check_shape(x, y)
d = []
logging.info('Starting non-negative jackknife...')
for i in tqdm(range(len(s) - 1)):
jk_est = func(np.vstack([x[0:s[i], ...], x[s[i + 1]:, ...]]), np.vstack([y[0:s[i], ...], y[s[i + 1]:, ...]]))
d.append(jk_est)
return np.concatenate(d, axis=0)
def __init__(self, x, y, is_large_chi2, n_blocks=None, nn=False, separators=None, chr_num=None, evenodd_split=False, nnls_exact=False):
Jackknife.__init__(self, x, y, n_blocks, separators)
#estimate taus
if nn: # non-negative least squares
if nnls_exact:
self.est = np.atleast_2d(nnls(x, np.array(y).T[0])[0])
else:
xtx = x.T.dot(x)
lasso = Lasso(alpha=1e-100, fit_intercept=False, normalize=False, precompute=xtx, positive=True, max_iter=10000, random_state=0)
self.est = lasso.fit(x,y[:,0]).coef_.reshape((1, x.shape[1]))
else:
self.est = np.atleast_2d(np.linalg.lstsq(x, np.array(y).T[0])[0])
#move large_chi2 SNPs to the end of x and y (don't include them in the separator definition, so that they'll never get removed during jackknife)
if np.any(is_large_chi2):
x_large = x[is_large_chi2]
y_large = y[is_large_chi2]
x = x[~is_large_chi2]
y = y[~is_large_chi2]
Jackknife.__init__(self, x, y, n_blocks, separators)
x = np.concatenate((x,x_large), axis=0)
y = np.concatenate((y,y_large), axis=0)
#jackknife
if nn:
d = []
s = self.separators
for i in tqdm(range(len(s) - 1), disable=False):
x_noblock = np.delete(x, slice(s[i], s[i+1]), axis=0)
y_noblock = np.delete(y, slice(s[i], s[i+1]), axis=0)
if nnls_exact:
jk_est = np.atleast_2d(nnls(x_noblock, y_noblock[:,0])[0])
else:
x_block = x[s[i] : s[i+1]]
xtx_noblock = xtx - x_block.T.dot(x_block)
lasso_noblock = Lasso(alpha=1e-100, fit_intercept=False, normalize=False, precompute=xtx_noblock, positive=True, max_iter=10000, random_state=0)
jk_est = lasso_noblock.fit(x_noblock, y_noblock[:,0]).coef_.reshape((1, x.shape[1]))
###z = nnls(x_noblock, y_noblock[:,0])[0]
###assert np.allclose(z, jk_est[0])
d.append(jk_est)
self.delete_values = np.concatenate(d, axis=0)
else:
self.delete_values = self.delete_values(x, y, func, self.separators)
self.pseudovalues = self.delete_values_to_pseudovalues(
self.delete_values, self.est)
(self.jknife_est, self.jknife_var, self.jknife_se, self.jknife_cov) =\
self.jknife(self.pseudovalues)
if evenodd_split:
assert y.shape[1]==1
assert chr_num is not None
assert len(np.unique(chr_num)) > 1
self.chr_list = np.sort(np.unique(chr_num))
self.est_loco = np.empty((len(self.chr_list), x.shape[1]), dtype=np.float32)
for chr_i, left_out_chr in enumerate(tqdm(self.chr_list)):
is_loco = ((chr_num%2)==(left_out_chr%2)) & (chr_num != left_out_chr)
x_loco = x[is_loco]
y_loco = y[is_loco]
self.est_loco[chr_i, :] = nnls(x_loco, y_loco[:,0])[0]
class LstsqJackknifeFast(Jackknife):
def __init__(self, x, y, is_large_chi2, n_blocks=None, separators=None, chr_num=None, evenodd_split=False):
#compute jackknife estimates using all SNPs
Jackknife.__init__(self, x, y, n_blocks, separators)
xty, xtx = self.block_values(x, y, self.separators)
self.est = self.block_values_to_est(xty, xtx)
#compute xtx_tot and xty_tot
xty_tot = np.sum(xty, axis=0)
xtx_tot = np.sum(xtx, axis=0)
#exclude large-chi2 SNPs from xtx and xty for the jackknife
if np.any(is_large_chi2):
x = x[~is_large_chi2]
y = y[~is_large_chi2]
Jackknife.__init__(self, x, y, n_blocks, separators)
xty, xtx = self.block_values(x, y, self.separators)
self.delete_values = self.block_values_to_delete_values(xty, xtx, xtx_tot=xtx_tot, xty_tot=xty_tot)
self.pseudovalues = self.delete_values_to_pseudovalues(
self.delete_values, self.est)
(self.jknife_est, self.jknife_var, self.jknife_se, self.jknife_cov) =\
self.jknife(self.pseudovalues)
if evenodd_split:
assert y.shape[1]==1
assert chr_num is not None
assert len(np.unique(chr_num)) > 1
x_even = x[chr_num %2 == 0]
y_even = y[chr_num %2 == 0]
XTX_even = x_even.T.dot(x_even)
XTy_even = y_even[:,0].dot(x_even)
del x_even, y_even
x_odd = x[chr_num %2 != 0]
y_odd = y[chr_num %2 != 0]
XTX_odd = x_odd.T.dot(x_odd)
XTy_odd = y_odd[:,0].dot(x_odd)
del x_odd, y_odd
assert np.allclose(XTy_even + XTy_odd, y[:,0].dot(x))
assert np.allclose(XTX_even + XTX_odd, x.T.dot(x))
self.chr_list = np.sort(np.unique(chr_num))
#self.est_chr = np.empty((len(self.chr_list), x.shape[1]), dtype=np.float32)
self.est_loco = np.empty((len(self.chr_list), x.shape[1]), dtype=np.float32)
for chr_i, left_out_chr in enumerate(self.chr_list):
x_chr = x[chr_num == left_out_chr]
y_chr = y[chr_num == left_out_chr, 0]
XTX_chr = x_chr.T.dot(x_chr)
XTy_chr = y_chr.dot(x_chr)
if left_out_chr %2 == 0:
XTX_loco = XTX_even - XTX_chr
XTy_loco = XTy_even - XTy_chr
else:
XTX_loco = XTX_odd - XTX_chr
XTy_loco = XTy_odd - XTy_chr
self.est_loco[chr_i, :] = np.linalg.solve(XTX_loco, XTy_loco)
#self.est_chr[chr_i, :] = np.linalg.solve(XTX_chr, XTy_chr)
@classmethod
def block_values_to_delete_values(cls, xty_block_values, xtx_block_values, xtx_tot, xty_tot):
n_blocks, p = _check_shape_block(xty_block_values, xtx_block_values)
delete_values = np.zeros((n_blocks, p))
for j in range(n_blocks):
delete_xty = xty_tot - xty_block_values[j]
delete_xtx = xtx_tot - xtx_block_values[j]
delete_values[j, ...] = np.linalg.solve(
delete_xtx, delete_xty).reshape((1, p))
return delete_values
@classmethod
def block_values(cls, x, y, s):
'''
Compute block values.
Parameters
----------
x : np.matrix with shape (n, p)
Independent variable.
y : np.matrix with shape (n, 1)
Dependent variable.
n_blocks : int
Number of jackknife blocks
s : list of ints
Block separators.
Returns
-------
xty_block_values : np.matrix with shape (n_blocks, p)
Block values of X^T Y.
xtx_block_values : 3d np array with shape (n_blocks, p, p)
Block values of X^T X.
Raises
------
ValueError :
If x.shape[0] does not equal y.shape[0] or x and y are not 2D.
'''
n, p = _check_shape(x, y)
n_blocks = len(s) - 1
xtx_block_values = np.zeros((n_blocks, p, p))
xty_block_values = np.zeros((n_blocks, p))
for i in range(n_blocks):
xty_block_values[i, ...] = np.dot(
x[s[i]:s[i + 1], ...].T, y[s[i]:s[i + 1], ...]).reshape((1, p))
xtx_block_values[i, ...] = np.dot(
x[s[i]:s[i + 1], ...].T, x[s[i]:s[i + 1], ...])
return (xty_block_values, xtx_block_values)
@classmethod
def block_values_to_est(cls, xty_block_values, xtx_block_values):
'''
Converts block values to the whole-data linear regression estimate.
Parameters
----------
xty_block_values : np.matrix with shape (n_blocks, p)
Block values of X^T Y.
xtx_block_values : 3D np.array with shape (n_blocks, p, p)
Block values of X^T X
Returns
-------
est : np.matrix with shape (1, p)
Whole data estimate.
Raises
------
LinAlgError :
If design matrix is singular.
ValueError :
If the last two dimensions of xtx_block_values are not equal or if the first two
dimensions of xtx_block_values do not equal the shape of xty_block_values.
'''
n_blocks, p = _check_shape_block(xty_block_values, xtx_block_values)
xty = np.sum(xty_block_values, axis=0)
xtx = np.sum(xtx_block_values, axis=0)
return np.linalg.solve(xtx, xty).reshape((1, p))
class RatioJackknife(Jackknife):
'''
Block jackknife ratio estimate.
Jackknife.
Parameters
----------
est : float or np.array with shape (1, p)
Whole data ratio estimate
numer_delete_values : np.matrix with shape (n_blocks, p)
Delete values for the numerator.
denom_delete_values: np.matrix with shape (n_blocks, p)
Delete values for the denominator.
Methods
-------
delete_vals_to_pseudovalues(est, denom, num):
Converts denominator/ numerator delete values and the whole-data estimate to
pseudovalues.
Raises
------
FloatingPointError :
If any entry of denom_delete_values is zero.
Note that it is possible for the denominator to cross zero (i.e., be both positive
and negative) and still have a finite ratio estimate and SE, for example if the
numerator is fixed to 0 and the denominator is either -1 or 1. If the denominator
is noisily close to zero, then it is unlikely that the denominator will yield zero
exactly (and therefore yield an inf or nan), but delete values will be of the form
(numerator / close to zero) and -(numerator / close to zero), i.e., (big) and -(big),
and so the jackknife will (correctly) yield huge SE.
'''
def __init__(self, est, numer_delete_values, denom_delete_values):
if numer_delete_values.shape != denom_delete_values.shape:
raise ValueError(
'numer_delete_values.shape != denom_delete_values.shape.')
if len(numer_delete_values.shape) != 2:
raise ValueError('Delete values must be matrices.')
if len(est.shape) != 2 or est.shape[0] != 1 or est.shape[1] != numer_delete_values.shape[1]:
raise ValueError(
'Shape of est does not match shape of delete values.')
self.n_blocks = numer_delete_values.shape[0]
self.est = est
self.pseudovalues = self.delete_values_to_pseudovalues(self.est,
denom_delete_values, numer_delete_values)
(self.jknife_est, self.jknife_var, self.jknife_se, self.jknife_cov) =\
self.jknife(self.pseudovalues)
@classmethod
def delete_values_to_pseudovalues(cls, est, denom, numer):
'''
Converts delete values to pseudovalues.
Parameters
----------
est : np.matrix with shape (1, p)
Whole-data ratio estimate.
denom : np.matrix with shape (n_blocks, p)
Denominator delete values.
numer : np.matrix with shape (n_blocks, p)
Numerator delete values.
Returns
-------
pseudovalues :
Ratio Jackknife Pseudovalues.
Raises
------
ValueError :
If numer.shape != denom.shape.
'''
n_blocks, p = denom.shape
pseudovalues = np.zeros((n_blocks, p))
for j in range(0, n_blocks):
pseudovalues[j, ...] = n_blocks * est - \
(n_blocks - 1) * numer[j, ...] / denom[j, ...]
return pseudovalues
class Jackknife_Ridge(Jackknife):
def __init__(self, x, y, n_blocks=None, separators=None, chr_num=None, verbose=True,
num_lambdas=100, approx_ridge=False,
ridge_lambda=None, use_1se=False, has_intercept=False, standardize=True,
skip_ridge_jackknife=True, num_chr_sets=2):
#sanity checks
assert chr_num is not None
# # # chr_num[:100000]=1
# # # chr_num[100000:]=2
assert len(np.unique(chr_num)) > 1
#init stuff
Jackknife.__init__(self, x, y, n_blocks=n_blocks, separators=separators)
self.use_1se = use_1se
self.verbose=verbose
self.has_intercept = has_intercept
###define chromosome sets
assert num_chr_sets>1
if num_chr_sets == 2:
#Use the good old fashioned odd/even chromosome split
chromosomes = np.sort(np.unique(chr_num))
self.chromosome_sets = []
self.chromosome_sets.append(chromosomes[chromosomes%2==0])
self.chromosome_sets.append(chromosomes[chromosomes%2!=0])
elif num_chr_sets == 22:
self.chromosome_sets = [np.array([c]) for c in range(1,23)]
else:
chr_sizes = np.bincount(chr_num)[1:]
assert num_chr_sets<=len(chr_sizes)
chr_assignments = self._divide_chromosomes_to_sets(chr_sizes, num_chr_sets)
self.chromosome_sets = []
for set_i in range(num_chr_sets):
self.chromosome_sets.append(np.where(chr_assignments==set_i)[0]+1)
#make sure we work with numpy arrays, not dataframes
try: x=x.values
except: pass
try: y=y.values
except: pass
try: constraints=constraints.values
except: pass
try: chr_num=chr_num.values
except: pass
#make y look like a vector
assert y.shape[1]==1
y = y[:,0]
#standardize x
if standardize:
x_l2 = np.sqrt(np.einsum('ij,ij->j', x, x))
x /= x_l2
else:
x_l2 = None
#Create a set of ridge lambdas to evaluate
XTX_all = x.T.dot(x)
XTy_all = y.dot(x)
mean_diag = np.mean(np.diag(XTX_all))
self.ridge_lambdas = np.logspace(np.log10(mean_diag*1e-8), np.log10(mean_diag*1e2), num=num_lambdas)
#find best lambda (using off-chromosome estimation) and estimate taus
if ridge_lambda is not None:
assert self.approx_ridge
best_lambda = ridge_lambda
else:
best_lambda, r2_best_lambda = self._find_best_lambda(x, y, XTX_all, XTy_all, chr_num)
self.est = np.atleast_2d(self._est_ridge(XTX_all, XTy_all, best_lambda))
self.r2_best_lambda = r2_best_lambda
if standardize:
self.est /= x_l2
#LOCO (leave one chromosome out) computations
self.est_chr_lstsq, self.est_chr_ridge, self.est_loco_lstsq, self.est_loco_ridge = \
self._est_taus_loco(x, y, XTX_all, XTy_all, chr_num, best_lambda, standardize, x_l2)
#run jackknife
if not skip_ridge_jackknife:
self.delete_values = np.empty((len(self.separators)-1, self.est.shape[1]), dtype=np.float32)
self.est_chr_lstsq_jk_list = []
self.est_chr_ridge_jk_list = []
self.est_loco_lstsq_jk_list = []
self.est_loco_ridge_jk_list = []
logging.info('Running ridge jackknife...')
self.best_r2_jk_noblock = np.zeros(len(self.separators) - 1)
for block_i in tqdm(range(len(self.separators) - 1)):
#prepare data structures
x_block = x[self.separators[block_i]:self.separators[block_i+1], ...]
y_block = y[self.separators[block_i]:self.separators[block_i+1], ...]
XTX_noblock = XTX_all - x_block.T.dot(x_block)
XTy_noblock = XTy_all - y_block.dot(x_block)
slice_block = slice(self.separators[block_i], self.separators[block_i+1])
x_noblock = np.delete(x, slice_block, axis=0)
y_noblock = np.delete(y, slice_block, axis=0)
chr_noblock = np.delete(chr_num, slice_block, axis=0)
#find best lambda for this jackknife block
if approx_ridge:
best_lambda_noblock = best_lambda
else:
best_lambda_noblock, r2_noblock = self._find_best_lambda(x_noblock, y_noblock, XTX_noblock, XTy_noblock, chr_noblock)
self.best_r2_jk_noblock[block_i] = r2_noblock
#main jackknife estimation
est_block = self._est_ridge(XTX_noblock, XTy_noblock, best_lambda_noblock)
self.delete_values[block_i, ...] = est_block
#jackknife LOCO computation
est_chr_lstsq, est_chr_ridge, est_loco_lstsq, est_loco_ridge = \
self._est_taus_loco(x_noblock, y_noblock, XTX_noblock, XTy_noblock,
chr_noblock, best_lambda_noblock, standardize, x_l2)
self.est_chr_lstsq_jk_list.append(est_chr_lstsq)
self.est_chr_ridge_jk_list.append(est_chr_ridge)
self.est_loco_lstsq_jk_list.append(est_loco_lstsq)
self.est_loco_ridge_jk_list.append(est_loco_ridge)
if standardize: self.delete_values /= x_l2
#compute jackknife pseudo-values
self.pseudovalues = self.delete_values_to_pseudovalues(self.delete_values, self.est)
(self.jknife_est, self.jknife_var, self.jknife_se, self.jknife_cov) = self.jknife(self.pseudovalues)
#restore original x
if standardize: x *= x_l2
def _divide_chromosomes_to_sets(self, chr_sizes, num_sets):
chr_order = np.argsort(chr_sizes)[::-1] #np.arange(len(chr_sizes))
chr_assignments = np.zeros(22, dtype=np.int) - 1
chr_assignments[chr_order[:num_sets]] = np.arange(num_sets)
set_sizes = chr_sizes[chr_order[:num_sets]].copy()
for c_i in chr_order[num_sets : len(chr_sizes)]:
smallest_set = np.argmin(set_sizes)
chr_assignments[c_i] = smallest_set
set_sizes[smallest_set] += chr_sizes[c_i]
assert set_sizes.sum() == chr_sizes.sum()
return chr_assignments
def _est_taus_loco(self, x, y, XTX, XTy, chr_num, ridge_lambda, standardize, x_l2=None, reestimate_lambda=False):
chromosomes = np.sort(np.unique(chr_num))
est_set_lstsq = np.empty((len(self.chromosome_sets), x.shape[1]), dtype=np.float32)
est_noset_lstsq = np.empty((len(self.chromosome_sets), x.shape[1]), dtype=np.float32)
est_set_ridge = np.empty((len(self.chromosome_sets), x.shape[1]), dtype=np.float32)
est_noset_ridge = np.empty((len(self.chromosome_sets), x.shape[1]), dtype=np.float32)
tqdm_chr_sets = tqdm(self.chromosome_sets)
logging.info('Estimating annotation coefficients for each chromosomes set')
for set_i, chromosome_set in enumerate(tqdm_chr_sets):
is_in_set = np.isin(chr_num, chromosome_set)
if not np.any(is_in_set): continue
x_set = x[is_in_set]
y_set = y[is_in_set]
XTX_set = x_set.T.dot(x_set)
XTy_set = y_set.dot(x_set)
XTX_noset = XTX - XTX_set
XTy_noset = XTy - XTy_set
if (not reestimate_lambda) or (len(chromosomes) <= 2):
best_lambda_noset = ridge_lambda
best_lambda_set = ridge_lambda
else:
x_loco = x[~is_in_set]
y_loco = y[~is_in_set]
chr_loco = chr_num[~is_in_set]
best_lambda_noset, r2_noset = self._find_best_lambda(x_loco, y_loco, XTX_noset, XTy_noset, chr_loco)
if len(chromosome_set) == 1:
best_lambda_set = ridge_lambda
else:
best_lambda_set, r2_set = self._find_best_lambda(x_set, y_set, XTX_set, XTy_set, chr_num[is_in_set])
est_set_lstsq[set_i, :] = self._est_ridge(XTX_set, XTy_set, ridge_lambda=0)
est_set_ridge[set_i, :] = self._est_ridge(XTX_set, XTy_set, best_lambda_set)
est_noset_lstsq[set_i, :] = self._est_ridge(XTX_noset, XTy_noset, ridge_lambda=0)
est_noset_ridge[set_i, :] = self._est_ridge(XTX_noset, XTy_noset, best_lambda_noset)
###import ipdb; ipdb.set_trace()
if standardize:
est_set_lstsq /= x_l2
est_set_ridge /= x_l2
est_noset_lstsq /= x_l2
est_noset_ridge /= x_l2
return est_set_lstsq, est_set_ridge, est_noset_lstsq, est_noset_ridge
def _find_best_lambda(self, x, y, XTX, XTy, chr_num):
chromosomes = np.sort(np.unique(chr_num))
assert len(chromosomes) > 1
num_lambdas = len(self.ridge_lambdas)
y_pred_lambdas = np.empty((chr_num.shape[0], num_lambdas), dtype=np.float32)
if self.verbose:
y_pred_lambdas_lstsq = np.empty(chr_num.shape[0], dtype=np.float32)
logging.info('iterating over chromosomes to compute XTX, XTy...')
for chr_i, left_out_chr in enumerate(tqdm(chromosomes)):
is_chr = (chr_num == left_out_chr)
chr_inds = np.where(is_chr)[0]
assert np.all(chr_inds == np.arange(chr_inds[0], chr_inds[-1]+1))
chr_slice = slice(chr_inds[0], chr_inds[-1]+1)
x_chr = x[chr_slice]
y_chr = y[chr_slice]
XTX_loco = XTX - x_chr.T.dot(x_chr)
XTy_loco = XTy - y_chr.dot(x_chr)
y_pred_lambdas[chr_slice, :] = self._predict_lambdas(XTX_loco, XTy_loco, x_chr)
if self.verbose:
tau_lstsq_loco = self._est_ridge(XTX_loco, XTy_loco, 0)
y_pred_lambdas_lstsq[chr_slice] = x_chr.dot(tau_lstsq_loco)
#Assign an r2 score to each lambda
score_lambdas = np.empty(num_lambdas, dtype=np.float32)
logging.info('Evaluating Ridge lambdas...')
for r_i in tqdm(range(num_lambdas)):
score_lambdas[r_i] = r2_score(y, y_pred_lambdas[:,r_i])
#choose lambda based on the 1SE rule?
if self.use_1se:
score_folds = np.empty(len(chromosomes), dtype=np.float32)
for chr_i, left_out_chr in enumerate(chromosomes):
is_chr = (chr_num == left_out_chr)
score_folds[chr_i] = r2_score(y[is_chr], y_pred_lambdas[is_chr, best_lambda_index])
scores_std = np.std(score_folds)
best_score = score_lambdas[best_lambda_index]
assert np.isclose(best_score, np.max(score_lambdas))
best_lambda_index = np.where(score_lambdas > best_score - scores_std)[0][-1]
else:
best_lambda_index = np.argmax(score_lambdas)
best_lambda = self.ridge_lambdas[best_lambda_index]
if self.verbose:
score_lstsq = r2_score(y, y_pred_lambdas_lstsq)
logging.info('Selected ridge lambda: %0.4e (%d/%d) score: %0.4e score lstsq: %0.4e'%(best_lambda,
best_lambda_index+1, num_lambdas, score_lambdas[best_lambda_index], score_lstsq))
return best_lambda, score_lambdas[best_lambda_index]
def _predict_lambdas(self, XTX_train, XTy_train, X_validation):
tau_est_ridge = np.empty((XTX_train.shape[0], len(self.ridge_lambdas)), dtype=np.float32)
for r_i, r in enumerate(self.ridge_lambdas):
tau_est_ridge[:, r_i] = self._est_ridge(XTX_train, XTy_train, r)
y_pred = X_validation.dot(tau_est_ridge)
return y_pred
def _est_ridge(self, XTX, XTy, ridge_lambda):
I = np.eye(XTX.shape[0]) * ridge_lambda
if self.has_intercept: I[-1,-1]=0
return np.linalg.solve(XTX+I, XTy)
```
#### File: polyfun/ldsc_polyfun/sumstats.py
```python
import numpy as np
import pandas as pd
from scipy import stats
import itertools as it
from . import parse as ps
from . import regressions as reg
import sys
import traceback
import copy
import os
_N_CHR = 22
# complementary bases
COMPLEMENT = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
# bases
BASES = list(COMPLEMENT.keys())
# true iff strand ambiguous
STRAND_AMBIGUOUS = {''.join(x): x[0] == COMPLEMENT[x[1]]
for x in it.product(BASES, BASES)
if x[0] != x[1]}
# SNPS we want to keep (pairs of alleles)
VALID_SNPS = {x for x in [''.join(y) for y in it.product(BASES, BASES)]
if x[0] != x[1] and not STRAND_AMBIGUOUS[x]}
# T iff SNP 1 has the same alleles as SNP 2 (allowing for strand or ref allele flip).
MATCH_ALLELES = {x for x in [''.join(y) for y in it.product(VALID_SNPS, VALID_SNPS)]
# strand and ref match
if ((x[0] == x[2]) and (x[1] == x[3])) or
# ref match, strand flip
((x[0] == COMPLEMENT[x[2]]) and (x[1] == COMPLEMENT[x[3]])) or
# ref flip, strand match
((x[0] == x[3]) and (x[1] == x[2])) or
((x[0] == COMPLEMENT[x[3]]) and (x[1] == COMPLEMENT[x[2]]))} # strand and ref flip
# T iff SNP 1 has the same alleles as SNP 2 w/ ref allele flip.
FLIP_ALLELES = {''.join(x):
((x[0] == x[3]) and (x[1] == x[2])) or # strand match
# strand flip
((x[0] == COMPLEMENT[x[3]]) and (x[1] == COMPLEMENT[x[2]]))
for x in MATCH_ALLELES}
def _splitp(fstr):
flist = fstr.split(',')
flist = [os.path.expanduser(os.path.expandvars(x)) for x in flist]
return flist
def _select_and_log(x, ii, log, msg):
'''Fiter down to rows that are True in ii. Log # of SNPs removed.'''
new_len = ii.sum()
if new_len == 0:
raise ValueError(msg.format(N=0))
else:
x = x[ii]
log.log(msg.format(N=new_len))
return x
def smart_merge(x, y):
'''Check if SNP columns are equal. If so, save time by using concat instead of merge.'''
assert len(set(x.columns).intersection(set(y.drop(columns=['SNP']).columns))) == 0
if len(x) == len(y) and (x.index == y.index).all() and (x.SNP == y.SNP).all():
#x = x.reset_index(drop=True)
#y = y.reset_index(drop=True).drop(columns=['SNP'])
#out = pd.concat([x, y], axis=1)
out = pd.concat([x, y.drop(columns=['SNP'])], axis=1)
else:
if x.index.name == 'snpid' and y.index.name == 'snpid':
out = pd.merge(x, y.drop(columns=['SNP']), how='inner', left_index=True, right_index=True)
else:
out = pd.merge(x, y, how='inner', on='SNP')
return out
def _read_ref_ld(args, log):
'''Read reference LD Scores.'''
ref_ld = _read_chr_split_files(args.ref_ld_chr, args.ref_ld, log,
'reference panel LD Score', ps.ldscore_fromlist)
log.log(
'Read reference panel LD Scores for {N} SNPs.'.format(N=len(ref_ld)))
return ref_ld
def _read_annot(args, log):
'''Read annot matrix.'''
if (args.anno is not None): annotations = args.anno.split(',')
else: annotations = None
try:
if args.ref_ld is not None:
overlap_matrix, M_tot = _read_chr_split_files(args.ref_ld_chr, args.ref_ld, log,
'annot matrix', ps.annot, frqfile=args.frqfile, anno=annotations)
elif args.ref_ld_chr is not None:
overlap_matrix, M_tot = _read_chr_split_files(args.ref_ld_chr, args.ref_ld, log,
'annot matrix', ps.annot, frqfile=args.frqfile_chr, anno=annotations)
except Exception:
log.log('Error parsing .annot file.')
raise
return overlap_matrix, M_tot
def _read_M(args, log, n_annot):
'''Read M (--M, --M-file, etc).'''
if args.M:
try:
M_annot = [float(x) for x in _splitp(args.M)]
except ValueError as e:
raise ValueError('Could not cast --M to float: ' + str(e.args))
else:
if args.ref_ld:
M_annot = ps.M_fromlist(
_splitp(args.ref_ld), common=(not args.not_M_5_50))
elif args.ref_ld_chr:
M_annot = ps.M_fromlist(
_splitp(args.ref_ld_chr), _N_CHR, common=(not args.not_M_5_50))
try:
M_annot = np.array(M_annot).reshape((1, n_annot))
except ValueError as e:
raise ValueError(
'# terms in --M must match # of LD Scores in --ref-ld.\n' + str(e.args))
return M_annot
def _read_w_ld(args, log):
'''Read regression SNP LD.'''
if (args.w_ld and ',' in args.w_ld) or (args.w_ld_chr and ',' in args.w_ld_chr):
raise ValueError(
'--w-ld must point to a single fileset (no commas allowed).')
w_ld = _read_chr_split_files(args.w_ld_chr, args.w_ld, log,
'regression weight LD Score', ps.ldscore_fromlist)
w_ld.drop(['CHR'], axis=1, inplace=True)
if len(w_ld.columns) != 2:
raise ValueError('--w-ld may only have one LD Score column.')
w_ld.columns = ['SNP', 'LD_weights'] # prevent colname conflicts w/ ref ld
log.log(
'Read regression weight LD Scores for {N} SNPs.'.format(N=len(w_ld)))
return w_ld
def _read_chr_split_files(chr_arg, not_chr_arg, log, noun, parsefunc, **kwargs):
'''Read files split across 22 chromosomes (annot, ref_ld, w_ld).'''
try:
if not_chr_arg:
log.log('Reading {N} from {F} ...'.format(F=not_chr_arg, N=noun))
out = parsefunc(_splitp(not_chr_arg), **kwargs)
elif chr_arg:
f = ps.sub_chr(chr_arg, '[1-22]')
log.log('Reading {N} from {F} ...'.format(F=f, N=noun))
out = parsefunc(_splitp(chr_arg), _N_CHR, **kwargs)
except ValueError as e:
log.log('Error parsing {N}.'.format(N=noun))
raise e
return out
def _read_sumstats(args, log, fh, alleles=True, dropna=False):
'''Parse summary statistics.'''
log.log('Reading summary statistics from {S} ...'.format(S=fh))
sumstats = ps.sumstats(fh, alleles=alleles, dropna=dropna)
log_msg = 'Read summary statistics for {N} SNPs.'
log.log(log_msg.format(N=len(sumstats)))
if np.any(sumstats.index.duplicated()):
m = len(sumstats)
sumstats = sumstats.loc[~sumstats.index.duplicated()]
log.log('Dropped {M} duplicated SNPs.'.format(M=m - len(sumstats)))
return sumstats
def _check_ld_condnum(args, log, ref_ld):
'''Check condition number of LD Score matrix.'''
if len(ref_ld.shape) >= 2:
cond_num = int(np.linalg.cond(ref_ld))
if cond_num > 100000:
if args.invert_anyway:
warn = "WARNING: LD Score matrix condition number is {C}. "
warn += "Inverting anyway because the --invert-anyway flag is set."
log.log(warn.format(C=cond_num))
else:
warn = "WARNING: LD Score matrix condition number is {C}. "
warn += "Remove collinear LD Scores. "
raise ValueError(warn.format(C=cond_num))
def _check_variance(log, M_annot, ref_ld):
'''Remove zero-variance LD Scores.'''
###ii = ref_ld.iloc[:, 2:].var(axis=0) == 0 # NB there is a SNP and CHR column here
ii = np.array([(ref_ld[c].var() == 0) for c in ref_ld.columns[2:]]) #This command uses way way less memory
if ii.all():
raise ValueError('All LD Scores have zero variance.')
elif ii.any():
log.log('Removing partitioned LD Scores with zero variance: %s'%(','.join(ref_ld.columns[2:][ii])))
ii_snp = np.array([True, True] + list(~ii))
ii_m = np.array(~ii)
ref_ld = ref_ld.loc[:, ii_snp]
M_annot = M_annot[:, ii_m]
return M_annot, ref_ld, ii
def _warn_length(log, sumstats):
if len(sumstats) < 200000:
log.log(
'WARNING: number of SNPs less than 200k; this is almost always bad.')
def _print_cov(ldscore_reg, ofh, log):
'''Prints covariance matrix of slopes.'''
log.log(
'Printing covariance matrix of the estimates to {F}.'.format(F=ofh))
np.savetxt(ofh, ldscore_reg.coef_cov)
def _print_delete_values(ldscore_reg, ofh, log):
'''Prints block jackknife delete-k values'''
log.log('Printing block jackknife delete values to {F}.'.format(F=ofh))
np.savetxt(ofh, ldscore_reg.tot_delete_values)
def _print_part_delete_values(ldscore_reg, ofh, log):
'''Prints partitioned block jackknife delete-k values'''
log.log('Printing partitioned block jackknife delete values to {F}.'.format(F=ofh))
np.savetxt(ofh, ldscore_reg.part_delete_values)
def _merge_and_log(ld, sumstats, noun, log):
'''Wrap smart merge with log messages about # of SNPs.'''
sumstats = smart_merge(ld, sumstats)
msg = 'After merging with {F}, {N} SNPs remain.'
if len(sumstats) == 0:
msg += ' Please make sure that your annotation files include the SNPs in your sumstats files (please see the PolyFun wiki for details on downloading functional annotations)'
raise ValueError(msg.format(N=len(sumstats), F=noun))
else:
log.log(msg.format(N=len(sumstats), F=noun))
return sumstats
def _read_ld_sumstats(args, log, fh, alleles=True, dropna=True):
sumstats = _read_sumstats(args, log, fh, alleles=alleles, dropna=dropna)
ref_ld = _read_ref_ld(args, log)
n_annot = len(ref_ld.columns) - 2 #Changed to -2 because we also have chromosome column now
M_annot = _read_M(args, log, n_annot)
#keep only requested annotations if --anno was specified
if args.anno is not None:
cols_to_keep = np.zeros(len(ref_ld.columns), dtype=np.bool)
annotations = args.anno.split(',')
is_found1 = np.isin(annotations, ref_ld.columns.str[:-2])
is_found2 = np.isin(annotations, ref_ld.columns.str[:-4])
is_found = is_found1 | is_found2
if np.any(~is_found):
raise ValueError('Not all annotations specified with --anno are found in the LD scores file')
cols_to_keep = (ref_ld.columns.str[:-2].isin(annotations)) | (ref_ld.columns.str[:-4].isin(annotations)) | (ref_ld.columns.isin(['CHR', 'SNP']))
assert np.sum(cols_to_keep) == len(annotations)+2
cols_nochrsnp = ref_ld.drop(columns=['CHR', 'SNP']).columns
M_cols_to_keep = (cols_nochrsnp.str[:-2].isin(annotations)) | (cols_nochrsnp.str[:-4].isin(annotations))
assert np.sum(M_cols_to_keep) == len(annotations)
ref_ld = ref_ld.loc[:, cols_to_keep]
M_annot = M_annot[:, M_cols_to_keep]
log.log('Keeping only annotations specified with --anno')
M_annot, ref_ld, novar_cols = _check_variance(log, M_annot, ref_ld)
w_ld = _read_w_ld(args, log)
sumstats = _merge_and_log(ref_ld, sumstats, 'reference panel LD', log)
sumstats = _merge_and_log(sumstats, w_ld, 'regression SNP LD', log)
w_ld_cname = sumstats.columns[-1]
ref_ld_cnames = ref_ld.drop(columns=['CHR', 'SNP']).columns
return M_annot, w_ld_cname, ref_ld_cnames, sumstats, novar_cols
def estimate_h2(args, log):
'''Estimate h2 and partitioned h2.'''
args = copy.deepcopy(args)
if args.samp_prev is not None and args.pop_prev is not None:
args.samp_prev, args.pop_prev = list(map(
float, [args.samp_prev, args.pop_prev]))
if args.intercept_h2 is not None:
args.intercept_h2 = float(args.intercept_h2)
if args.no_intercept:
args.intercept_h2 = 1
M_annot, w_ld_cname, ref_ld_cnames, sumstats, novar_cols = _read_ld_sumstats(
args, log, args.h2)
ref_ld = np.array(sumstats[ref_ld_cnames], dtype=np.float32)
if not args.skip_cond_check:
_check_ld_condnum(args, log, ref_ld_cnames)
_warn_length(log, sumstats)
n_snp = len(sumstats)
n_blocks = min(n_snp, args.n_blocks)
n_annot = len(ref_ld_cnames)
chisq_max = args.chisq_max
old_weights = False
if n_annot == 1:
if args.two_step is None and args.intercept_h2 is None:
args.two_step = 30
else:
old_weights = True
if args.chisq_max is None:
chisq_max = max(0.001*sumstats.N.max(), args.max_chi2)
s = lambda x: np.array(x).reshape((n_snp, 1))
chisq = s(sumstats.Z**2).astype(np.float32)
if chisq_max is not None and not args.keep_large:
ii = np.ravel(chisq < chisq_max)
sumstats = sumstats.loc[ii, :]
log.log('Removed {M} SNPs with chi^2 > {C} ({N} SNPs remain)'.format(
C=chisq_max, N=np.sum(ii), M=n_snp-np.sum(ii)))
n_snp = np.sum(ii) # lambdas are late-binding, so this works
ref_ld = np.array(sumstats[ref_ld_cnames], dtype=np.float32)
chisq = chisq[ii].reshape((n_snp, 1))
if args.two_step is not None:
log.log('Using two-step estimator with cutoff at {M}.'.format(M=args.two_step))
hsqhat = reg.Hsq(chisq, ref_ld, s(sumstats[w_ld_cname]), s(sumstats.N),
M_annot, n_blocks=n_blocks, intercept=args.intercept_h2,
twostep=args.two_step, old_weights=old_weights,
chr_num=sumstats['CHR'],
loco=args.loco, ridge_lambda=args.ridge_lambda,
standardize_ridge=not args.no_standardize_ridge,
approx_ridge=not args.reestimate_lambdas,
skip_ridge_jackknife=not args.ridge_jackknife,
num_chr_sets = args.num_chr_sets,
evenodd_split=args.evenodd_split,
nn=args.nn,
keep_large=args.keep_large,
nnls_exact=args.nnls_exact
)
if args.print_cov:
_print_cov(hsqhat, args.out + '.cov', log)
if args.print_delete_vals:
_print_delete_values(hsqhat, args.out + '.delete', log)
_print_part_delete_values(hsqhat, args.out + '.part_delete', log)
#save ridge-regression lambdas if possible
if args.loco and args.ridge_jackknife and args.reestimate_lambdas:
np.savetxt(args.out+'.out_of_chrom_r2.txt', [hsqhat.jknife_ridge.r2_best_lambda])
df = pd.Series(hsqhat.jknife_ridge.best_r2_jk_noblock)
df.to_csv(args.out+'.out_of_chrom_r2_jk.txt', index=False, header=False)
log.log(hsqhat.summary(ref_ld_cnames, P=args.samp_prev, K=args.pop_prev, overlap = args.overlap_annot))
if args.overlap_annot:
overlap_matrix, M_tot = _read_annot(args, log)
# overlap_matrix = overlap_matrix[np.array(~novar_cols), np.array(~novar_cols)]#np.logical_not
df_results = hsqhat._overlap_output(ref_ld_cnames, overlap_matrix, M_annot, M_tot, args.print_coefficients)
df_results.to_csv(args.out+'.results', sep="\t", index=False, na_rep='NA', float_format='%0.4e')
log.log('Results printed to '+args.out+'.results')
return hsqhat
def estimate_rg(args, log):
'''Estimate rg between trait 1 and a list of other traits.'''
args = copy.deepcopy(args)
rg_paths, rg_files = _parse_rg(args.rg)
n_pheno = len(rg_paths)
f = lambda x: _split_or_none(x, n_pheno)
args.intercept_h2, args.intercept_gencov, args.samp_prev, args.pop_prev = list(map(f,
(args.intercept_h2, args.intercept_gencov, args.samp_prev, args.pop_prev)))
list(map(lambda x: _check_arg_len(x, n_pheno), ((args.intercept_h2, '--intercept-h2'),
(args.intercept_gencov, '--intercept-gencov'),
(args.samp_prev, '--samp-prev'),
(args.pop_prev, '--pop-prev'))))
if args.no_intercept:
args.intercept_h2 = [1 for _ in range(n_pheno)]
args.intercept_gencov = [0 for _ in range(n_pheno)]
p1 = rg_paths[0]
out_prefix = args.out + rg_files[0]
M_annot, w_ld_cname, ref_ld_cnames, sumstats, _ = _read_ld_sumstats(args, log, p1,
alleles=True, dropna=True)
RG = []
n_annot = M_annot.shape[1]
if n_annot == 1 and args.two_step is None and args.intercept_h2 is None:
args.two_step = 30
if args.two_step is not None:
log.log('Using two-step estimator with cutoff at {M}.'.format(M=args.two_step))
for i, p2 in enumerate(rg_paths[1:n_pheno]):
log.log(
'Computing rg for phenotype {I}/{N}'.format(I=i + 2, N=len(rg_paths)))
try:
loop = _read_other_sumstats(args, log, p2, sumstats, ref_ld_cnames)
rghat = _rg(loop, args, log, M_annot, ref_ld_cnames, w_ld_cname, i)
RG.append(rghat)
_print_gencor(args, log, rghat, ref_ld_cnames, i, rg_paths, i == 0)
out_prefix_loop = out_prefix + '_' + rg_files[i + 1]
if args.print_cov:
_print_rg_cov(rghat, out_prefix_loop, log)
if args.print_delete_vals:
_print_rg_delete_values(rghat, out_prefix_loop, log)
except Exception: # keep going if phenotype 50/100 causes an error
msg = 'ERROR computing rg for phenotype {I}/{N}, from file {F}.'
log.log(msg.format(I=i + 2, N=len(rg_paths), F=rg_paths[i + 1]))
ex_type, ex, tb = sys.exc_info()
log.log(traceback.format_exc(ex) + '\n')
if len(RG) <= i: # if exception raised before appending to RG
RG.append(None)
log.log('\nSummary of Genetic Correlation Results\n' +
_get_rg_table(rg_paths, RG, args))
return RG
def _read_other_sumstats(args, log, p2, sumstats, ref_ld_cnames):
loop = _read_sumstats(args, log, p2, alleles=True, dropna=False)
loop = _merge_sumstats_sumstats(args, sumstats, loop, log)
loop = loop.dropna(how='any')
alleles = loop.A1 + loop.A2 + loop.A1x + loop.A2x
if not args.no_check_alleles:
loop = _select_and_log(loop, _filter_alleles(alleles), log,
'{N} SNPs with valid alleles.')
loop['Z2'] = _align_alleles(loop.Z2, alleles)
loop = loop.drop(['A1', 'A1x', 'A2', 'A2x'], axis=1)
_check_ld_condnum(args, log, loop[ref_ld_cnames])
_warn_length(log, loop)
return loop
def _get_rg_table(rg_paths, RG, args):
'''Print a table of genetic correlations.'''
t = lambda attr: lambda obj: getattr(obj, attr, 'NA')
x = pd.DataFrame()
x['p1'] = [rg_paths[0] for i in range(1, len(rg_paths))]
x['p2'] = rg_paths[1:len(rg_paths)]
x['rg'] = list(map(t('rg_ratio'), RG))
x['se'] = list(map(t('rg_se'), RG))
x['z'] = list(map(t('z'), RG))
x['p'] = list(map(t('p'), RG))
if args.samp_prev is not None and args.pop_prev is not None and\
all((i is not None for i in args.samp_prev)) and all((i is not None for it in args.pop_prev)):
c = reg.h2_obs_to_liab(1, args.samp_prev[1], args.pop_prev[1])
x['h2_liab'] = [c * x for x in list(map(t('tot'), list(map(t('hsq2'), RG))))]
x['h2_liab_se'] = [c * x for x in list(map(t('tot_se'), list(map(t('hsq2'), RG))))]
else:
x['h2_obs'] = list(map(t('tot'), list(map(t('hsq2'), RG))))
x['h2_obs_se'] = list(map(t('tot_se'), list(map(t('hsq2'), RG))))
x['h2_int'] = list(map(t('intercept'), list(map(t('hsq2'), RG))))
x['h2_int_se'] = list(map(t('intercept_se'), list(map(t('hsq2'), RG))))
x['gcov_int'] = list(map(t('intercept'), list(map(t('gencov'), RG))))
x['gcov_int_se'] = list(map(t('intercept_se'), list(map(t('gencov'), RG))))
return x.to_string(header=True, index=False) + '\n'
def _print_gencor(args, log, rghat, ref_ld_cnames, i, rg_paths, print_hsq1):
l = lambda x: x + ''.join(['-' for i in range(len(x.replace('\n', '')))])
P = [args.samp_prev[0], args.samp_prev[i + 1]]
K = [args.pop_prev[0], args.pop_prev[i + 1]]
if args.samp_prev is None and args.pop_prev is None:
args.samp_prev = [None, None]
args.pop_prev = [None, None]
if print_hsq1:
log.log(l('\nHeritability of phenotype 1\n'))
log.log(rghat.hsq1.summary(ref_ld_cnames, P=P[0], K=K[0]))
log.log(
l('\nHeritability of phenotype {I}/{N}\n'.format(I=i + 2, N=len(rg_paths))))
log.log(rghat.hsq2.summary(ref_ld_cnames, P=P[1], K=K[1]))
log.log(l('\nGenetic Covariance\n'))
log.log(rghat.gencov.summary(ref_ld_cnames, P=P, K=K))
log.log(l('\nGenetic Correlation\n'))
log.log(rghat.summary() + '\n')
def _merge_sumstats_sumstats(args, sumstats1, sumstats2, log):
'''Merge two sets of summary statistics.'''
sumstats1.rename(columns={'N': 'N1', 'Z': 'Z1'}, inplace=True)
sumstats2.rename(
columns={'A1': 'A1x', 'A2': 'A2x', 'N': 'N2', 'Z': 'Z2'}, inplace=True)
x = _merge_and_log(sumstats1, sumstats2, 'summary statistics', log)
return x
def _filter_alleles(alleles):
'''Remove bad variants (mismatched alleles, non-SNPs, strand ambiguous).'''
ii = alleles.apply(lambda y: y in MATCH_ALLELES)
return ii
def _align_alleles(z, alleles):
'''Align Z1 and Z2 to same choice of ref allele (allowing for strand flip).'''
try:
z *= (-1) ** alleles.apply(lambda y: FLIP_ALLELES[y])
except KeyError as e:
msg = 'Incompatible alleles in .sumstats files: %s. ' % e.args
msg += 'Did you forget to use --merge-alleles with munge_sumstats.py?'
raise KeyError(msg)
return z
def _rg(sumstats, args, log, M_annot, ref_ld_cnames, w_ld_cname, i):
'''Run the regressions.'''
n_snp = len(sumstats)
s = lambda x: np.array(x).reshape((n_snp, 1))
if args.chisq_max is not None:
ii = sumstats.Z1**2*sumstats.Z2**2 < args.chisq_max**2
n_snp = np.sum(ii) # lambdas are late binding, so this works
sumstats = sumstats[ii]
n_blocks = min(args.n_blocks, n_snp)
ref_ld = sumstats.as_matrix(columns=ref_ld_cnames)
intercepts = [args.intercept_h2[0], args.intercept_h2[
i + 1], args.intercept_gencov[i + 1]]
rghat = reg.RG(s(sumstats.Z1), s(sumstats.Z2),
ref_ld, s(sumstats[w_ld_cname]), s(
sumstats.N1), s(sumstats.N2), M_annot,
intercept_hsq1=intercepts[0], intercept_hsq2=intercepts[1],
intercept_gencov=intercepts[2], n_blocks=n_blocks, twostep=args.two_step)
return rghat
def _parse_rg(rg):
'''Parse args.rg.'''
rg_paths = _splitp(rg)
rg_files = [x.split('/')[-1] for x in rg_paths]
if len(rg_paths) < 2:
raise ValueError(
'Must specify at least two phenotypes for rg estimation.')
return rg_paths, rg_files
def _print_rg_delete_values(rg, fh, log):
'''Print block jackknife delete values.'''
_print_delete_values(rg.hsq1, fh + '.hsq1.delete', log)
_print_delete_values(rg.hsq2, fh + '.hsq2.delete', log)
_print_delete_values(rg.gencov, fh + '.gencov.delete', log)
def _print_rg_cov(rghat, fh, log):
'''Print covariance matrix of estimates.'''
_print_cov(rghat.hsq1, fh + '.hsq1.cov', log)
_print_cov(rghat.hsq2, fh + '.hsq2.cov', log)
_print_cov(rghat.gencov, fh + '.gencov.cov', log)
def _split_or_none(x, n):
if x is not None:
y = list(map(float, x.replace('N', '-').split(',')))
else:
y = [None for _ in range(n)]
return y
def _check_arg_len(x, n):
x, m = x
if len(x) != n:
raise ValueError(
'{M} must have the same number of arguments as --rg/--h2.'.format(M=m))
```
#### File: polyfun/ldstore/parse.py
```python
import numpy as np
def areSNPsIncluded( snps, n_snps, fname ):
for snp in snps:
if snp >= n_snps:
print('Cannot read dosages for ' + str( snp ) + 'th SNP. File "' + fname + '" contains only ' + str( n_snps ) + ' SNPs!')
return
def convertIntToFloat( x, n_bytes ):
if type( x ) is np.ndarray:
return convertIntToFloat_array( x, n_bytes )
else:
return convertIntToFloat_scalar( x, n_bytes )
def convertIntToFloat_array( x, n_bytes ):
int_na = getIntNA( n_bytes )
y = np.zeros( len( x ) )
y[ x == int_na ] = np.nan
y[ x != int_na ] = np.ldexp( x[ x != int_na ], -1 * ( 8 * n_bytes - 2 ) )
return y
def convertIntToFloat_scalar( x, n_bytes ):
if x == getIntNA( n_bytes ):
return np.nan
else:
return np.ldexp( x, -1 * ( 8 * n_bytes - 2 ) )
def getIntNA( n_bytes ):
if n_bytes == 2:
return 53248
elif n_bytes == 4:
return 3489660928
elif n_bytes == 8:
return 14987979559889010688
elif n_bytes == 1:
return 208
else:
print('Only 1, 2, 4 and 8 bytes are supported!')
return
```
|
{
"source": "jerome-f/susieR",
"score": 2
}
|
#### File: inst/code/monitor_memory.py
```python
import time
import psutil
import subprocess
class ProcessTimer:
def __init__(self, command, interval = 1):
self.command = command
self.execution_state = False
self.interval = interval
def execute(self):
self.max_vms_memory = 0
self.max_rss_memory = 0
self.t0 = time.time()
self.t1 = None
self.max_t = [self.t0]
try:
self.p = subprocess.Popen(self.command, shell=False)
except FileNotFoundError:
self.p = None
sys.exit("Invalid command `{}`".format(sys.argv[1]))
self.execution_state = True
def poll(self):
if not self.check_execution_state():
return False
self.t1 = time.time()
try:
pp = psutil.Process(self.p.pid)
# Obtain a list of the subprocess and all its descendants.
descendants = list(pp.children(recursive=True))
descendants = descendants + [pp]
rss_memory = 0
vms_memory = 0
# Calculate and sum up the memory of the subprocess and all its
# descendants.
for descendant in descendants:
try:
mem_info = descendant.memory_info()
rss_memory += mem_info[0]
vms_memory += mem_info[1]
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
# Sometimes a subprocess descendant will have terminated
# between the time we obtain a list of descendants, and the
# time we actually poll this descendant's memory usage.
pass
if int(self.max_vms_memory * 1E-8) < int(vms_memory * 1E-8):
# Peak memory updated, at ~100-MB resolution.
self.max_t = [self.t1]
if int(self.max_vms_memory * 1E-8) == int(vms_memory * 1E-8):
# Peak memory maintained.
self.max_t = [self.max_t[0], self.t1]
self.max_vms_memory = max(self.max_vms_memory,vms_memory)
self.max_rss_memory = max(self.max_rss_memory,rss_memory)
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
return self.check_execution_state()
return self.check_execution_state()
def is_running(self):
return psutil.pid_exists(self.p.pid) and self.p.poll() == None
def check_execution_state(self):
if not self.execution_state:
return False
if self.is_running():
return True
self.executation_state = False
self.t1 = time.time()
return False
def close(self,kill=False):
if self.p is not None:
try:
pp = psutil.Process(self.p.pid)
if kill:
pp.kill()
else:
pp.terminate()
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
pass
def takewhile_excluding(iterable, value = ['|', '<', '>']):
for it in iterable:
if it in value:
return
yield it
if __name__ == '__main__':
import sys, os
if len(sys.argv) <= 1:
sys.exit()
interval = float(os.environ['MEM_CHECK_INTERVAL']) if 'MEM_CHECK_INTERVAL' in os.environ else 1
ptimer = ProcessTimer(takewhile_excluding(sys.argv[1:]), interval)
try:
ptimer.execute()
# Poll as often as possible; otherwise the subprocess might
# "sneak" in some extra memory usage while you aren't looking.
while ptimer.poll():
time.sleep(ptimer.interval)
finally:
# Make sure that we don't leave the process dangling.
ptimer.close()
sys.stderr.write('\ntime elapsed: {:.2f}s\n'.format(max(0, ptimer.t1 - ptimer.t0 - ptimer.interval * 0.5)))
sys.stderr.write('peak first occurred: {:.2f}s\n'.format(min(ptimer.max_t) - ptimer.t0))
sys.stderr.write('peak last occurred: {:.2f}s\n'.format(max(ptimer.max_t) - ptimer.t0))
sys.stderr.write('max vms_memory: {:.2f}GB\n'.format(ptimer.max_vms_memory * 1.07E-9))
sys.stderr.write('max rss_memory: {:.2f}GB\n'.format(ptimer.max_rss_memory * 1.07E-9))
sys.stderr.write('memory check interval: %ss\n' % ptimer.interval)
sys.stderr.write('return code: %s\n' % ptimer.p.returncode)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.