max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
examples/scripts/flopy_lake_example.py | andrewcalderwood/flopy | 351 | 10400 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| en | 0.849596 | # make sure workspace directory exists # save the starting path # change to the working directory # We are creating a square model with a specified head equal to `h1` along all boundaries. # The head at the cell in the center in the top layer is fixed to `h2`. First, set the name # of the model and the parameters of the model: the number of layers `Nlay`, the number of rows # and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic # conductivity `Kh` # Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it # whatever you want). The modelname will be the name given to all MODFLOW files (input and output). # The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k' # for MODFLOW2000 or 'mf2005'for MODFLOW2005. # Define the discretization of the model. All layers are given equal thickness. The `bot` array # is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and # `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed, # the Discretization file is built. # Next we specify the boundary conditions and starting heads with the Basic package. The `ibound` # array will be `1` in all cells in all layers, except for along the boundary and in the cell at # the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads # are used to define the heads in the fixed head cells (this is a steady simulation, so none of # the other starting values matter). So we set the starting heads to `h1` everywhere, except for # the head at the center of the model in the top layer. # create external ibound array and starting head files # The aquifer properties (really only the hydraulic conductivity) are defined with the # LPF package. # Finally, we need to specify the solver we want to use (PCG with default values), and the # output control (using the default values). Then we are ready to write all MODFLOW input # files and run MODFLOW. # change back to the starting directory # Once the model has terminated normally, we can read the heads file. First, a link to the heads # file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by # specifying, in this case, the step number and period number for which we want to retrieve data. # A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions # are used to make contours of the layers or a cross-section. | 2.857816 | 3 |
P2/Caso2/clustering.py | Ocete/Inteligenica-de-Negocio | 0 | 10401 | # -*- coding: utf-8 -*-
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
'''
import time
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import metrics
from sklearn import cluster
from math import floor
import seaborn as sns
# Cosas bonitas por defecto
sns.set()
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
censo = pd.read_csv('../mujeres_fecundidad_INE_2018.csv')
'''
for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
#'''
#Se pueden reemplazar los valores desconocidos por un número
#censo = censo.replace(np.NaN,0)
# Sustituimos valores perdidos con la media
for col in censo:
censo[col].fillna(censo[col].mean(), inplace=True)
#seleccionar casos
subset = censo.loc[(censo['TRAREPRO']==1) & (censo['NEMBTRAREPRO']<=6)]
# Seleccionar variables
usadas = ['NHIJOS', 'TIPOTRAREPRO', 'NMESESTRAREPRO', 'NEMBTRAREPRO']
X = subset[usadas]
X_normal = X.apply(norm_to_zero_one)
print('Tamaño de la población tras filtrado: ',len(X_normal.index))
for col in X:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count, ' AFTER')
algoritmos = (('KMeans', cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)),
('MeanShift', cluster.MeanShift(cluster_all=False, min_bin_freq=3)),
('Ward', cluster.AgglomerativeClustering(n_clusters=4, linkage='ward')),
('DBScan', cluster.DBSCAN(eps=0.35, min_samples=5)),
('Birch', cluster.Birch(threshold=0.1,n_clusters=5)))
cluster_predict = {}
calinski = {}
silh = {}
times = {}
n_clusters = {}
clusters_fig, clusters_axis = plt.subplots(3, 2, figsize=(10,10))
clusters_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
ijs = [(0,0), (0,1), (1,0), (1,1), (2,0), (2,1)]
for i_alg, par in enumerate(algoritmos):
name, alg = par
print('----- Ejecutando ' + name,)
t = time.time()
cluster_predict[name] = alg.fit_predict(X_normal)
tiempo = time.time() - t
times[name] = tiempo
metric_CH = metrics.calinski_harabasz_score(X_normal, cluster_predict[name])
calinski[name] = metric_CH
metric_SC = metrics.silhouette_score(X_normal, cluster_predict[name], metric='euclidean', sample_size=floor(len(X)), random_state=123456)
silh[name] = metric_SC
# Asignamos de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict[name],index=X.index,columns=['cluster'])
if (name == 'KMeans'):
clusters_kmeans = clusters
alg_kmeans = alg
elif (name == 'Ward'):
clusters_ward = clusters
print("Tamaño de cada cluster:")
size = clusters['cluster'].value_counts()
cluster_fractions = []
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
cluster_fractions.append( 100*i/len(clusters) )
n_clusters[name] = len(size)
# Bar charts
if ( len(cluster_fractions) > 7 ):
cluster_fractions = cluster_fractions[0:6]
i, j = ijs[i_alg]
y_pos = np.arange(len(cluster_fractions))
labels = [ "Cluster " + str(i) for i in range(len(cluster_fractions)) ]
clusters_axis[i, j].bar(y_pos, cluster_fractions, tick_label=labels, color=clusters_colors)
clusters_axis[i, j].set_ylim(0, 100)
clusters_axis[i, j].set_title(name)
if (j == 0):
clusters_axis[i, j].set_ylabel("Cluster size (%)")
clusters_axis[2,1].remove()
#clusters_fig.savefig("clusters.png")
plt.show()
from prettytable import PrettyTable
header = ['Algoritmo', 'CH', 'Silh', 'Tiempo', 'Número de clusters']
tabla = PrettyTable(header)
for name, alg in algoritmos:
tabla.add_row([name,
"{0:.2f}".format(calinski[name]),
"{0:.2f}".format(silh[name]),
"{0:.2f}".format(times[name]),
n_clusters[name]])
print(tabla)
# Escribir los datos en un general.csv
'''
with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
#'''
# ----------------------- FUNCIONES DE DISTRIBUCIÓN ---------
print("---------- Preparando funciones de distribución...")
n_clusters_ward = n_clusters['Ward']
n_var = len(usadas)
X_ward = pd.concat([X, clusters_ward], axis=1)
fig, axes = plt.subplots(n_clusters_ward, n_var, sharey=True, figsize=(15,15))
fig.subplots_adjust(wspace=0, hspace=0)
colors = sns.color_palette(palette=None, n_colors=n_clusters_ward, desat=None)
rango = []
for j in range(n_var):
rango.append([X_ward[usadas[j]].min(), X_ward[usadas[j]].max()])
for i in range(n_clusters_ward):
dat_filt = X_ward.loc[X_ward['cluster']==i]
for j in range(n_var):
#ax = sns.kdeplot(dat_filt[usadas[j]], label="", shade=True, color=colors[i], ax=axes[i,j])
ax = sns.boxplot(dat_filt[usadas[j]], color=colors[i], flierprops={'marker':'o','markersize':4}, ax=axes[i,j])
if (i==n_clusters_ward-1):
axes[i,j].set_xlabel(usadas[j])
else:
axes[i,j].set_xlabel("")
if (j==0):
axes[i,j].set_ylabel("Cluster "+str(i))
else:
axes[i,j].set_ylabel("")
axes[i,j].set_yticks([])
axes[i,j].grid(axis='x', linestyle='-', linewidth='0.2', color='gray')
axes[i,j].grid(axis='y', b=False)
ax.set_xlim(rango[j][0]-0.05*(rango[j][1]-rango[j][0]),rango[j][1]+0.05*(rango[j][1]-rango[j][0]))
plt.show()
#fig.savefig("boxes.png")
# ---------------- SCATTER MATRIX -----------------------
'''
plt.clf()
print("---------- Preparando el scatter matrix...")
# Se añade la asignación de clusters como columna a X
variables = list(X_ward)
variables.remove('cluster')
sns_plot = sns.pairplot(X_ward, vars=variables, hue="cluster", palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# sns_plot.savefig("scatter_matrix.png")
plt.show()
#'''
# ----------------------- DENDOGRAMAS -----------------------
#En clustering hay que normalizar para las métricas de distancia
# X_normal = preprocessing.normalize(X, norm='l2')
X_normal = (X - X.min() ) / (X.max() - X.min())
#Vamos a usar este jerárquico y nos quedamos con 100 clusters, es decir, cien ramificaciones del dendrograma
ward = cluster.AgglomerativeClustering(n_clusters=20, linkage='ward')
name, algorithm = ('Ward', ward)
cluster_predict = {}
k = {}
t = time.time()
cluster_predict[name] = algorithm.fit_predict(X_normal)
tiempo = time.time() - t
k[name] = len(set(cluster_predict[name]))
# Se convierte la asignación de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict['Ward'],index=X.index,columns=['cluster'])
# Y se añade como columna a X
X_cluster = pd.concat([X, clusters], axis=1)
# Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico
min_size = 3
X_filtrado = X
'''
X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k['Ward'],k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
X_filtrado = X
#'''
#Normalizo el conjunto filtrado
X_filtrado_normal = preprocessing.normalize(X_filtrado, norm='l2')
# Obtengo el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
from scipy.cluster import hierarchy
linkage_array = hierarchy.ward(X_filtrado_normal)
plt.clf()
dendro = hierarchy.dendrogram(linkage_array,orientation='left', p=10, truncate_mode='lastp') #lo pongo en horizontal para compararlo con el generado por seaborn
# puedo usar "p=10,truncate_mode='lastp'" para cortar el dendrograma en 10 hojas
# Dendograma usando seaborn (que a su vez usa scipy) para incluir un heatmap
X_filtrado_normal_DF = pd.DataFrame(X_filtrado_normal, index=X_filtrado.index, columns=usadas)
# Añadimos una columna de label para indicar el cluster al que pertenece cada objeto
labels = X_ward['cluster']
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
# Para añadir los labels reordenados. Ahora mismo no salen los colores en la
# columna donde deberian. Intuyo que esto se debe a que los ids no encajan.
#'''
ordering = clustergrid.dendrogram_row.reordered_ind
labels_list = [x for _, x in sorted(zip(ordering,labels), key=lambda pair: pair[0])]
labels = pd.Series(labels_list, index=X_filtrado_normal_DF.index, name='cluster')
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
#'''
#plt.savefig("dendograma.png")
# ----------------------- HEATMAPS -----------------------
#'''
plt.figure(1)
centers = pd.DataFrame(alg_kmeans.cluster_centers_, columns=list(X))
centers_desnormal = centers.copy()
centers_desnormal = centers.drop([4])
# Calculamos los centroides
X = pd.concat([X, clusters_ward], axis=1)
for variable in list(centers):
for k_cluster in range(n_clusters_ward):
centroide = X.loc[(clusters_ward['cluster']==k_cluster)][variable].mean()
centers_desnormal.loc[k_cluster, variable] = centroide
# Normalizamos
centers_normal2 = centers_desnormal.copy()
centers_normal2 = (centers_normal2 - centers_normal2.min() ) / (centers_normal2.max() - centers_normal2.min())
import matplotlib.pyplot as plt
heatmap_fig, ax = plt.subplots(figsize=(10,10))
heatmap = sns.heatmap(centers_normal2, cmap="YlGnBu", annot=centers_desnormal, fmt='.3f')
# Para evitar que los bloques de arriba y abajo se corten por la mitad
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
#heatmap_fig.savefig("heatmap.png")
#'''
| # -*- coding: utf-8 -*-
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
'''
import time
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import metrics
from sklearn import cluster
from math import floor
import seaborn as sns
# Cosas bonitas por defecto
sns.set()
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
censo = pd.read_csv('../mujeres_fecundidad_INE_2018.csv')
'''
for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
#'''
#Se pueden reemplazar los valores desconocidos por un número
#censo = censo.replace(np.NaN,0)
# Sustituimos valores perdidos con la media
for col in censo:
censo[col].fillna(censo[col].mean(), inplace=True)
#seleccionar casos
subset = censo.loc[(censo['TRAREPRO']==1) & (censo['NEMBTRAREPRO']<=6)]
# Seleccionar variables
usadas = ['NHIJOS', 'TIPOTRAREPRO', 'NMESESTRAREPRO', 'NEMBTRAREPRO']
X = subset[usadas]
X_normal = X.apply(norm_to_zero_one)
print('Tamaño de la población tras filtrado: ',len(X_normal.index))
for col in X:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count, ' AFTER')
algoritmos = (('KMeans', cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)),
('MeanShift', cluster.MeanShift(cluster_all=False, min_bin_freq=3)),
('Ward', cluster.AgglomerativeClustering(n_clusters=4, linkage='ward')),
('DBScan', cluster.DBSCAN(eps=0.35, min_samples=5)),
('Birch', cluster.Birch(threshold=0.1,n_clusters=5)))
cluster_predict = {}
calinski = {}
silh = {}
times = {}
n_clusters = {}
clusters_fig, clusters_axis = plt.subplots(3, 2, figsize=(10,10))
clusters_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
ijs = [(0,0), (0,1), (1,0), (1,1), (2,0), (2,1)]
for i_alg, par in enumerate(algoritmos):
name, alg = par
print('----- Ejecutando ' + name,)
t = time.time()
cluster_predict[name] = alg.fit_predict(X_normal)
tiempo = time.time() - t
times[name] = tiempo
metric_CH = metrics.calinski_harabasz_score(X_normal, cluster_predict[name])
calinski[name] = metric_CH
metric_SC = metrics.silhouette_score(X_normal, cluster_predict[name], metric='euclidean', sample_size=floor(len(X)), random_state=123456)
silh[name] = metric_SC
# Asignamos de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict[name],index=X.index,columns=['cluster'])
if (name == 'KMeans'):
clusters_kmeans = clusters
alg_kmeans = alg
elif (name == 'Ward'):
clusters_ward = clusters
print("Tamaño de cada cluster:")
size = clusters['cluster'].value_counts()
cluster_fractions = []
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
cluster_fractions.append( 100*i/len(clusters) )
n_clusters[name] = len(size)
# Bar charts
if ( len(cluster_fractions) > 7 ):
cluster_fractions = cluster_fractions[0:6]
i, j = ijs[i_alg]
y_pos = np.arange(len(cluster_fractions))
labels = [ "Cluster " + str(i) for i in range(len(cluster_fractions)) ]
clusters_axis[i, j].bar(y_pos, cluster_fractions, tick_label=labels, color=clusters_colors)
clusters_axis[i, j].set_ylim(0, 100)
clusters_axis[i, j].set_title(name)
if (j == 0):
clusters_axis[i, j].set_ylabel("Cluster size (%)")
clusters_axis[2,1].remove()
#clusters_fig.savefig("clusters.png")
plt.show()
from prettytable import PrettyTable
header = ['Algoritmo', 'CH', 'Silh', 'Tiempo', 'Número de clusters']
tabla = PrettyTable(header)
for name, alg in algoritmos:
tabla.add_row([name,
"{0:.2f}".format(calinski[name]),
"{0:.2f}".format(silh[name]),
"{0:.2f}".format(times[name]),
n_clusters[name]])
print(tabla)
# Escribir los datos en un general.csv
'''
with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
#'''
# ----------------------- FUNCIONES DE DISTRIBUCIÓN ---------
print("---------- Preparando funciones de distribución...")
n_clusters_ward = n_clusters['Ward']
n_var = len(usadas)
X_ward = pd.concat([X, clusters_ward], axis=1)
fig, axes = plt.subplots(n_clusters_ward, n_var, sharey=True, figsize=(15,15))
fig.subplots_adjust(wspace=0, hspace=0)
colors = sns.color_palette(palette=None, n_colors=n_clusters_ward, desat=None)
rango = []
for j in range(n_var):
rango.append([X_ward[usadas[j]].min(), X_ward[usadas[j]].max()])
for i in range(n_clusters_ward):
dat_filt = X_ward.loc[X_ward['cluster']==i]
for j in range(n_var):
#ax = sns.kdeplot(dat_filt[usadas[j]], label="", shade=True, color=colors[i], ax=axes[i,j])
ax = sns.boxplot(dat_filt[usadas[j]], color=colors[i], flierprops={'marker':'o','markersize':4}, ax=axes[i,j])
if (i==n_clusters_ward-1):
axes[i,j].set_xlabel(usadas[j])
else:
axes[i,j].set_xlabel("")
if (j==0):
axes[i,j].set_ylabel("Cluster "+str(i))
else:
axes[i,j].set_ylabel("")
axes[i,j].set_yticks([])
axes[i,j].grid(axis='x', linestyle='-', linewidth='0.2', color='gray')
axes[i,j].grid(axis='y', b=False)
ax.set_xlim(rango[j][0]-0.05*(rango[j][1]-rango[j][0]),rango[j][1]+0.05*(rango[j][1]-rango[j][0]))
plt.show()
#fig.savefig("boxes.png")
# ---------------- SCATTER MATRIX -----------------------
'''
plt.clf()
print("---------- Preparando el scatter matrix...")
# Se añade la asignación de clusters como columna a X
variables = list(X_ward)
variables.remove('cluster')
sns_plot = sns.pairplot(X_ward, vars=variables, hue="cluster", palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# sns_plot.savefig("scatter_matrix.png")
plt.show()
#'''
# ----------------------- DENDOGRAMAS -----------------------
#En clustering hay que normalizar para las métricas de distancia
# X_normal = preprocessing.normalize(X, norm='l2')
X_normal = (X - X.min() ) / (X.max() - X.min())
#Vamos a usar este jerárquico y nos quedamos con 100 clusters, es decir, cien ramificaciones del dendrograma
ward = cluster.AgglomerativeClustering(n_clusters=20, linkage='ward')
name, algorithm = ('Ward', ward)
cluster_predict = {}
k = {}
t = time.time()
cluster_predict[name] = algorithm.fit_predict(X_normal)
tiempo = time.time() - t
k[name] = len(set(cluster_predict[name]))
# Se convierte la asignación de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict['Ward'],index=X.index,columns=['cluster'])
# Y se añade como columna a X
X_cluster = pd.concat([X, clusters], axis=1)
# Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico
min_size = 3
X_filtrado = X
'''
X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k['Ward'],k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
X_filtrado = X
#'''
#Normalizo el conjunto filtrado
X_filtrado_normal = preprocessing.normalize(X_filtrado, norm='l2')
# Obtengo el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
from scipy.cluster import hierarchy
linkage_array = hierarchy.ward(X_filtrado_normal)
plt.clf()
dendro = hierarchy.dendrogram(linkage_array,orientation='left', p=10, truncate_mode='lastp') #lo pongo en horizontal para compararlo con el generado por seaborn
# puedo usar "p=10,truncate_mode='lastp'" para cortar el dendrograma en 10 hojas
# Dendograma usando seaborn (que a su vez usa scipy) para incluir un heatmap
X_filtrado_normal_DF = pd.DataFrame(X_filtrado_normal, index=X_filtrado.index, columns=usadas)
# Añadimos una columna de label para indicar el cluster al que pertenece cada objeto
labels = X_ward['cluster']
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
# Para añadir los labels reordenados. Ahora mismo no salen los colores en la
# columna donde deberian. Intuyo que esto se debe a que los ids no encajan.
#'''
ordering = clustergrid.dendrogram_row.reordered_ind
labels_list = [x for _, x in sorted(zip(ordering,labels), key=lambda pair: pair[0])]
labels = pd.Series(labels_list, index=X_filtrado_normal_DF.index, name='cluster')
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
#'''
#plt.savefig("dendograma.png")
# ----------------------- HEATMAPS -----------------------
#'''
plt.figure(1)
centers = pd.DataFrame(alg_kmeans.cluster_centers_, columns=list(X))
centers_desnormal = centers.copy()
centers_desnormal = centers.drop([4])
# Calculamos los centroides
X = pd.concat([X, clusters_ward], axis=1)
for variable in list(centers):
for k_cluster in range(n_clusters_ward):
centroide = X.loc[(clusters_ward['cluster']==k_cluster)][variable].mean()
centers_desnormal.loc[k_cluster, variable] = centroide
# Normalizamos
centers_normal2 = centers_desnormal.copy()
centers_normal2 = (centers_normal2 - centers_normal2.min() ) / (centers_normal2.max() - centers_normal2.min())
import matplotlib.pyplot as plt
heatmap_fig, ax = plt.subplots(figsize=(10,10))
heatmap = sns.heatmap(centers_normal2, cmap="YlGnBu", annot=centers_desnormal, fmt='.3f')
# Para evitar que los bloques de arriba y abajo se corten por la mitad
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
#heatmap_fig.savefig("heatmap.png")
#'''
| es | 0.488537 | # -*- coding: utf-8 -*- Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/ # Cosas bonitas por defecto for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
# #Se pueden reemplazar los valores desconocidos por un número #censo = censo.replace(np.NaN,0) # Sustituimos valores perdidos con la media #seleccionar casos # Seleccionar variables # Asignamos de clusters a DataFrame # Bar charts #clusters_fig.savefig("clusters.png") # Escribir los datos en un general.csv with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
# # ----------------------- FUNCIONES DE DISTRIBUCIÓN --------- #ax = sns.kdeplot(dat_filt[usadas[j]], label="", shade=True, color=colors[i], ax=axes[i,j]) #fig.savefig("boxes.png") # ---------------- SCATTER MATRIX ----------------------- plt.clf()
print("---------- Preparando el scatter matrix...")
# Se añade la asignación de clusters como columna a X
variables = list(X_ward)
variables.remove('cluster')
sns_plot = sns.pairplot(X_ward, vars=variables, hue="cluster", palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# sns_plot.savefig("scatter_matrix.png")
plt.show()
# # ----------------------- DENDOGRAMAS ----------------------- #En clustering hay que normalizar para las métricas de distancia # X_normal = preprocessing.normalize(X, norm='l2') #Vamos a usar este jerárquico y nos quedamos con 100 clusters, es decir, cien ramificaciones del dendrograma # Se convierte la asignación de clusters a DataFrame # Y se añade como columna a X # Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k['Ward'],k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
X_filtrado = X
# #Normalizo el conjunto filtrado # Obtengo el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico #lo pongo en horizontal para compararlo con el generado por seaborn # puedo usar "p=10,truncate_mode='lastp'" para cortar el dendrograma en 10 hojas # Dendograma usando seaborn (que a su vez usa scipy) para incluir un heatmap # Añadimos una columna de label para indicar el cluster al que pertenece cada objeto # Para añadir los labels reordenados. Ahora mismo no salen los colores en la # columna donde deberian. Intuyo que esto se debe a que los ids no encajan. #''' #''' #plt.savefig("dendograma.png") # ----------------------- HEATMAPS ----------------------- #''' # Calculamos los centroides # Normalizamos # Para evitar que los bloques de arriba y abajo se corten por la mitad #heatmap_fig.savefig("heatmap.png") #''' | 3.499292 | 3 |
signal_processing/ecg_preproc.py | DeepPSP/cpsc2020 | 1 | 10402 | """
preprocess of (single lead) ecg signal:
band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact)
TODO:
1. motion artefact detection,
and slice the signal into continuous (no motion artefact within) segments
2. to add
References:
-----------
[1] https://github.com/PIA-Group/BioSPPy
[2] to add
"""
import os, time
import multiprocessing as mp
from copy import deepcopy
from numbers import Real
from typing import Union, Optional, Any, List, Dict
import numpy as np
from easydict import EasyDict as ED
from scipy.ndimage.filters import median_filter
from scipy.signal.signaltools import resample
from scipy.io import savemat
# from scipy.signal import medfilt
# https://github.com/scipy/scipy/issues/9680
try:
from biosppy.signals.tools import filter_signal
except:
from references.biosppy.biosppy.signals.tools import filter_signal
from cfg import PreprocCfg
from .ecg_rpeaks import (
xqrs_detect, gqrs_detect, pantompkins,
hamilton_detect, ssf_detect, christov_detect, engzee_detect, gamboa_detect,
)
from .ecg_rpeaks_dl import seq_lab_net_detect
__all__ = [
"preprocess_signal",
"parallel_preprocess_signal",
"denoise_signal",
]
QRS_DETECTORS = {
"xqrs": xqrs_detect,
"gqrs": gqrs_detect,
"pantompkins": pantompkins,
"hamilton": hamilton_detect,
"ssf": ssf_detect,
"christov": christov_detect,
"engzee": engzee_detect,
"gamboa": gamboa_detect,
"seq_lab": seq_lab_net_detect,
}
DL_QRS_DETECTORS = [
"seq_lab",
]
def preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will be updated by this `config`
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
filtered_ecg = raw_sig.copy()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
if fs != cfg.fs:
filtered_ecg = resample(filtered_ecg, int(round(len(filtered_ecg)*cfg.fs/fs)))
# remove baseline
if 'baseline' in cfg.preproc:
window1 = 2 * (cfg.baseline_window1 // 2) + 1 # window size must be odd
window2 = 2 * (cfg.baseline_window2 // 2) + 1
baseline = median_filter(filtered_ecg, size=window1, mode='nearest')
baseline = median_filter(baseline, size=window2, mode='nearest')
filtered_ecg = filtered_ecg - baseline
# filter signal
if 'bandpass' in cfg.preproc:
filtered_ecg = filter_signal(
signal=filtered_ecg,
ftype='FIR',
band='bandpass',
order=int(0.3 * fs),
sampling_rate=fs,
frequency=cfg.filter_band,
)['signal']
if cfg.rpeaks and cfg.rpeaks.lower() not in DL_QRS_DETECTORS:
# dl detectors not for parallel computing using `mp`
detector = QRS_DETECTORS[cfg.rpeaks.lower()]
rpeaks = detector(sig=filtered_ecg, fs=fs).astype(int)
else:
rpeaks = np.array([], dtype=int)
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
def parallel_preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None, save_dir:Optional[str]=None, save_fmt:str='npy', verbose:int=0) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will `update` this `config`
save_dir: str, optional,
directory for saving the outcome ('filtered_ecg' and 'rpeaks')
save_fmt: str, default 'npy',
format of the save files, 'npy' or 'mat'
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
start_time = time.time()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
epoch_len = int(cfg.parallel_epoch_len * fs)
epoch_overlap_half = int(cfg.parallel_epoch_overlap * fs) // 2
epoch_overlap = 2 * epoch_overlap_half
epoch_forward = epoch_len - epoch_overlap
if len(raw_sig) <= 3 * epoch_len: # too short, no need for parallel computing
retval = preprocess_signal(raw_sig, fs, cfg)
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
retval.rpeaks = rpeaks
return retval
l_epoch = [
raw_sig[idx*epoch_forward: idx*epoch_forward + epoch_len] \
for idx in range((len(raw_sig)-epoch_overlap)//epoch_forward)
]
if cfg.parallel_keep_tail:
tail_start_idx = epoch_forward * len(l_epoch) + epoch_overlap
if len(raw_sig) - tail_start_idx < 30 * fs: # less than 30s, make configurable?
# append to the last epoch
l_epoch[-1] = np.append(l_epoch[-1], raw_sig[tail_start_idx:])
else: # long enough
tail_epoch = raw_sig[tail_start_idx-epoch_overlap:]
l_epoch.append(tail_epoch)
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=preprocess_signal,
iterable=[(e, fs, cfg) for e in l_epoch],
)
if cfg.parallel_keep_tail:
tail_result = result[-1]
result = result[:-1]
filtered_ecg = result[0]['filtered_ecg'][:epoch_len-epoch_overlap_half]
rpeaks = result[0]['rpeaks'][np.where(result[0]['rpeaks']<epoch_len-epoch_overlap_half)[0]]
for idx, e in enumerate(result[1:]):
filtered_ecg = np.append(
filtered_ecg, e['filtered_ecg'][epoch_overlap_half: -epoch_overlap_half]
)
epoch_rpeaks = e['rpeaks'][np.where( (e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len-epoch_overlap_half) )[0]]
rpeaks = np.append(rpeaks, (idx+1)*epoch_forward + epoch_rpeaks)
if cfg.parallel_keep_tail:
filtered_ecg = np.append(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])
tail_rpeaks = tail_result['rpeaks'][np.where(tail_result['rpeaks'] >= epoch_overlap_half)[0]]
rpeaks = np.append(rpeaks, len(result)*epoch_forward + tail_rpeaks)
if verbose >= 1:
if cfg.rpeaks.lower() in DL_QRS_DETECTORS:
print(f"signal processing took {round(time.time()-start_time, 3)} seconds")
else:
print(f"signal processing and R peaks detection took {round(time.time()-start_time, 3)} seconds")
start_time = time.time()
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
if verbose >= 1:
print(f"R peaks detection using {cfg.rpeaks} took {round(time.time()-start_time, 3)} seconds")
if save_dir:
# NOTE: this part is not tested
os.makedirs(save_dir, exist_ok=True)
if save_fmt.lower() == 'npy':
np.save(os.path.join(save_dir, "filtered_ecg.npy"), filtered_ecg)
np.save(os.path.join(save_dir, "rpeaks.npy"), rpeaks)
elif save_fmt.lower() == 'mat':
# save into 2 files, keep in accordance
savemat(os.path.join(save_dir, "filtered_ecg.mat"), {"filtered_ecg": filtered_ecg}, format='5')
savemat(os.path.join(save_dir, "rpeaks.mat"), {"rpeaks": rpeaks}, format='5')
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
"""
to check correctness of the function `parallel_preprocess_signal`,
say for record A01, one can call
>>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten()
>>> processed = parallel_preprocess_signal(raw_sig, 400)
>>> print(len(processed['filtered_ecg']) - len(raw_sig))
>>> start_t = int(3600*24.7811)
>>> len_t = 10
>>> fig, ax = plt.subplots(figsize=(20,6))
>>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400])
>>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]:
>>> ax.axvline(r-start_t*400,c='red',linestyle='dashed')
>>> plt.show()
or one can use the 'dataset.py'
"""
| """
preprocess of (single lead) ecg signal:
band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact)
TODO:
1. motion artefact detection,
and slice the signal into continuous (no motion artefact within) segments
2. to add
References:
-----------
[1] https://github.com/PIA-Group/BioSPPy
[2] to add
"""
import os, time
import multiprocessing as mp
from copy import deepcopy
from numbers import Real
from typing import Union, Optional, Any, List, Dict
import numpy as np
from easydict import EasyDict as ED
from scipy.ndimage.filters import median_filter
from scipy.signal.signaltools import resample
from scipy.io import savemat
# from scipy.signal import medfilt
# https://github.com/scipy/scipy/issues/9680
try:
from biosppy.signals.tools import filter_signal
except:
from references.biosppy.biosppy.signals.tools import filter_signal
from cfg import PreprocCfg
from .ecg_rpeaks import (
xqrs_detect, gqrs_detect, pantompkins,
hamilton_detect, ssf_detect, christov_detect, engzee_detect, gamboa_detect,
)
from .ecg_rpeaks_dl import seq_lab_net_detect
__all__ = [
"preprocess_signal",
"parallel_preprocess_signal",
"denoise_signal",
]
QRS_DETECTORS = {
"xqrs": xqrs_detect,
"gqrs": gqrs_detect,
"pantompkins": pantompkins,
"hamilton": hamilton_detect,
"ssf": ssf_detect,
"christov": christov_detect,
"engzee": engzee_detect,
"gamboa": gamboa_detect,
"seq_lab": seq_lab_net_detect,
}
DL_QRS_DETECTORS = [
"seq_lab",
]
def preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will be updated by this `config`
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
filtered_ecg = raw_sig.copy()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
if fs != cfg.fs:
filtered_ecg = resample(filtered_ecg, int(round(len(filtered_ecg)*cfg.fs/fs)))
# remove baseline
if 'baseline' in cfg.preproc:
window1 = 2 * (cfg.baseline_window1 // 2) + 1 # window size must be odd
window2 = 2 * (cfg.baseline_window2 // 2) + 1
baseline = median_filter(filtered_ecg, size=window1, mode='nearest')
baseline = median_filter(baseline, size=window2, mode='nearest')
filtered_ecg = filtered_ecg - baseline
# filter signal
if 'bandpass' in cfg.preproc:
filtered_ecg = filter_signal(
signal=filtered_ecg,
ftype='FIR',
band='bandpass',
order=int(0.3 * fs),
sampling_rate=fs,
frequency=cfg.filter_band,
)['signal']
if cfg.rpeaks and cfg.rpeaks.lower() not in DL_QRS_DETECTORS:
# dl detectors not for parallel computing using `mp`
detector = QRS_DETECTORS[cfg.rpeaks.lower()]
rpeaks = detector(sig=filtered_ecg, fs=fs).astype(int)
else:
rpeaks = np.array([], dtype=int)
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
def parallel_preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None, save_dir:Optional[str]=None, save_fmt:str='npy', verbose:int=0) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will `update` this `config`
save_dir: str, optional,
directory for saving the outcome ('filtered_ecg' and 'rpeaks')
save_fmt: str, default 'npy',
format of the save files, 'npy' or 'mat'
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
start_time = time.time()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
epoch_len = int(cfg.parallel_epoch_len * fs)
epoch_overlap_half = int(cfg.parallel_epoch_overlap * fs) // 2
epoch_overlap = 2 * epoch_overlap_half
epoch_forward = epoch_len - epoch_overlap
if len(raw_sig) <= 3 * epoch_len: # too short, no need for parallel computing
retval = preprocess_signal(raw_sig, fs, cfg)
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
retval.rpeaks = rpeaks
return retval
l_epoch = [
raw_sig[idx*epoch_forward: idx*epoch_forward + epoch_len] \
for idx in range((len(raw_sig)-epoch_overlap)//epoch_forward)
]
if cfg.parallel_keep_tail:
tail_start_idx = epoch_forward * len(l_epoch) + epoch_overlap
if len(raw_sig) - tail_start_idx < 30 * fs: # less than 30s, make configurable?
# append to the last epoch
l_epoch[-1] = np.append(l_epoch[-1], raw_sig[tail_start_idx:])
else: # long enough
tail_epoch = raw_sig[tail_start_idx-epoch_overlap:]
l_epoch.append(tail_epoch)
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=preprocess_signal,
iterable=[(e, fs, cfg) for e in l_epoch],
)
if cfg.parallel_keep_tail:
tail_result = result[-1]
result = result[:-1]
filtered_ecg = result[0]['filtered_ecg'][:epoch_len-epoch_overlap_half]
rpeaks = result[0]['rpeaks'][np.where(result[0]['rpeaks']<epoch_len-epoch_overlap_half)[0]]
for idx, e in enumerate(result[1:]):
filtered_ecg = np.append(
filtered_ecg, e['filtered_ecg'][epoch_overlap_half: -epoch_overlap_half]
)
epoch_rpeaks = e['rpeaks'][np.where( (e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len-epoch_overlap_half) )[0]]
rpeaks = np.append(rpeaks, (idx+1)*epoch_forward + epoch_rpeaks)
if cfg.parallel_keep_tail:
filtered_ecg = np.append(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])
tail_rpeaks = tail_result['rpeaks'][np.where(tail_result['rpeaks'] >= epoch_overlap_half)[0]]
rpeaks = np.append(rpeaks, len(result)*epoch_forward + tail_rpeaks)
if verbose >= 1:
if cfg.rpeaks.lower() in DL_QRS_DETECTORS:
print(f"signal processing took {round(time.time()-start_time, 3)} seconds")
else:
print(f"signal processing and R peaks detection took {round(time.time()-start_time, 3)} seconds")
start_time = time.time()
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
if verbose >= 1:
print(f"R peaks detection using {cfg.rpeaks} took {round(time.time()-start_time, 3)} seconds")
if save_dir:
# NOTE: this part is not tested
os.makedirs(save_dir, exist_ok=True)
if save_fmt.lower() == 'npy':
np.save(os.path.join(save_dir, "filtered_ecg.npy"), filtered_ecg)
np.save(os.path.join(save_dir, "rpeaks.npy"), rpeaks)
elif save_fmt.lower() == 'mat':
# save into 2 files, keep in accordance
savemat(os.path.join(save_dir, "filtered_ecg.mat"), {"filtered_ecg": filtered_ecg}, format='5')
savemat(os.path.join(save_dir, "rpeaks.mat"), {"rpeaks": rpeaks}, format='5')
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
"""
to check correctness of the function `parallel_preprocess_signal`,
say for record A01, one can call
>>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten()
>>> processed = parallel_preprocess_signal(raw_sig, 400)
>>> print(len(processed['filtered_ecg']) - len(raw_sig))
>>> start_t = int(3600*24.7811)
>>> len_t = 10
>>> fig, ax = plt.subplots(figsize=(20,6))
>>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400])
>>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]:
>>> ax.axvline(r-start_t*400,c='red',linestyle='dashed')
>>> plt.show()
or one can use the 'dataset.py'
"""
| en | 0.667407 | preprocess of (single lead) ecg signal: band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact) TODO: 1. motion artefact detection, and slice the signal into continuous (no motion artefact within) segments 2. to add References: ----------- [1] https://github.com/PIA-Group/BioSPPy [2] to add # from scipy.signal import medfilt # https://github.com/scipy/scipy/issues/9680 finished, checked, Parameters: ----------- raw_sig: ndarray, the raw ecg signal fs: real number, sampling frequency of `raw_sig` config: dict, optional, extra process configuration, `PreprocCfg` will be updated by this `config` Returns: -------- retval: dict, with items - 'filtered_ecg': the array of the processed ecg signal - 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set NOTE: ----- output (`retval`) are resampled to have sampling frequency equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs` # remove baseline # window size must be odd # filter signal # dl detectors not for parallel computing using `mp` finished, checked, Parameters: ----------- raw_sig: ndarray, the raw ecg signal fs: real number, sampling frequency of `raw_sig` config: dict, optional, extra process configuration, `PreprocCfg` will `update` this `config` save_dir: str, optional, directory for saving the outcome ('filtered_ecg' and 'rpeaks') save_fmt: str, default 'npy', format of the save files, 'npy' or 'mat' Returns: -------- retval: dict, with items - 'filtered_ecg': the array of the processed ecg signal - 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set NOTE: ----- output (`retval`) are resampled to have sampling frequency equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs` # too short, no need for parallel computing # less than 30s, make configurable? # append to the last epoch # long enough # NOTE: this part is not tested # save into 2 files, keep in accordance to check correctness of the function `parallel_preprocess_signal`, say for record A01, one can call >>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten() >>> processed = parallel_preprocess_signal(raw_sig, 400) >>> print(len(processed['filtered_ecg']) - len(raw_sig)) >>> start_t = int(3600*24.7811) >>> len_t = 10 >>> fig, ax = plt.subplots(figsize=(20,6)) >>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400]) >>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]: >>> ax.axvline(r-start_t*400,c='red',linestyle='dashed') >>> plt.show() or one can use the 'dataset.py' | 2.390558 | 2 |
ocaml/bootstrap.bzl | mobileink/obazl | 0 | 10403 | <filename>ocaml/bootstrap.bzl<gh_stars>0
## mv to //:WORKSPACE.bzl ocaml_configure
load("//ocaml/_bootstrap:ocaml.bzl", _ocaml_configure = "ocaml_configure")
# load("//ocaml/_bootstrap:obazl.bzl", _obazl_configure = "obazl_configure")
load("//ocaml/_rules:ocaml_repository.bzl" , _ocaml_repository = "ocaml_repository")
# load("//ocaml/_rules:opam_configuration.bzl" , _opam_configuration = "opam_configuration")
# load("//ocaml/_toolchains:ocaml_toolchains.bzl",
# _ocaml_toolchain = "ocaml_toolchain",
# _ocaml_register_toolchains = "ocaml_register_toolchains")
# obazl_configure = _obazl_configure
ocaml_configure = _ocaml_configure
ocaml_repository = _ocaml_repository
# ocaml_toolchain = _ocaml_toolchain
# ocaml_register_toolchains = _ocaml_register_toolchains
| <filename>ocaml/bootstrap.bzl<gh_stars>0
## mv to //:WORKSPACE.bzl ocaml_configure
load("//ocaml/_bootstrap:ocaml.bzl", _ocaml_configure = "ocaml_configure")
# load("//ocaml/_bootstrap:obazl.bzl", _obazl_configure = "obazl_configure")
load("//ocaml/_rules:ocaml_repository.bzl" , _ocaml_repository = "ocaml_repository")
# load("//ocaml/_rules:opam_configuration.bzl" , _opam_configuration = "opam_configuration")
# load("//ocaml/_toolchains:ocaml_toolchains.bzl",
# _ocaml_toolchain = "ocaml_toolchain",
# _ocaml_register_toolchains = "ocaml_register_toolchains")
# obazl_configure = _obazl_configure
ocaml_configure = _ocaml_configure
ocaml_repository = _ocaml_repository
# ocaml_toolchain = _ocaml_toolchain
# ocaml_register_toolchains = _ocaml_register_toolchains
| en | 0.351615 | ## mv to //:WORKSPACE.bzl ocaml_configure # load("//ocaml/_bootstrap:obazl.bzl", _obazl_configure = "obazl_configure") # load("//ocaml/_rules:opam_configuration.bzl" , _opam_configuration = "opam_configuration") # load("//ocaml/_toolchains:ocaml_toolchains.bzl", # _ocaml_toolchain = "ocaml_toolchain", # _ocaml_register_toolchains = "ocaml_register_toolchains") # obazl_configure = _obazl_configure # ocaml_toolchain = _ocaml_toolchain # ocaml_register_toolchains = _ocaml_register_toolchains | 1.254656 | 1 |
tsts.py | tedtroxell/metrician | 0 | 10404 | <filename>tsts.py
from metrician.explainations.tests import *
| <filename>tsts.py
from metrician.explainations.tests import *
| none | 1 | 0.935181 | 1 |
|
simple/facenet.py | taflahi/facenet | 5 | 10405 | <reponame>taflahi/facenet
import tensorflow as tf
from .. src.align import detect_face
from .. src import facenet
from .. simple import download_model
import sys
import os
from os.path import expanduser
import copy
import cv2
import numpy as np
from scipy import spatial
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
def align_face(images, image_size=160, margin=11):
with tf.Graph().as_default():
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
tmp_image_paths = copy.copy(images)
img_list = []
for image in tmp_image_paths:
img = cv2.imread(os.path.expanduser(image))[:, :, ::-1]
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = cv2.resize(cropped[:, :, ::-1],
(image_size, image_size))[:, :, ::-1]
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images
def embedding(images):
# check is model exists
home = expanduser('~')
model_path = home + '/.facenet_model/20180408-102900/20180408-102900.pb'
if not os.path.exists(model_path):
print("model not exists, downloading model")
download_model.download()
print("model downloaded to " + model_path)
with tf.Graph().as_default():
with tf.Session() as sess:
facenet.load_model(model_path)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: images,
phase_train_placeholder: False}
emb = sess.run(embeddings, feed_dict=feed_dict)
return emb
def compare(images, threshold=0.7):
emb = embedding(images)
sims = np.zeros((len(images), len(images)))
for i in range(len(images)):
for j in range(len(images)):
sims[i][j] = (
1 - spatial.distance.cosine(emb[i], emb[j]) > threshold)
return sims
| import tensorflow as tf
from .. src.align import detect_face
from .. src import facenet
from .. simple import download_model
import sys
import os
from os.path import expanduser
import copy
import cv2
import numpy as np
from scipy import spatial
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
def align_face(images, image_size=160, margin=11):
with tf.Graph().as_default():
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
tmp_image_paths = copy.copy(images)
img_list = []
for image in tmp_image_paths:
img = cv2.imread(os.path.expanduser(image))[:, :, ::-1]
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = cv2.resize(cropped[:, :, ::-1],
(image_size, image_size))[:, :, ::-1]
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images
def embedding(images):
# check is model exists
home = expanduser('~')
model_path = home + '/.facenet_model/20180408-102900/20180408-102900.pb'
if not os.path.exists(model_path):
print("model not exists, downloading model")
download_model.download()
print("model downloaded to " + model_path)
with tf.Graph().as_default():
with tf.Session() as sess:
facenet.load_model(model_path)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: images,
phase_train_placeholder: False}
emb = sess.run(embeddings, feed_dict=feed_dict)
return emb
def compare(images, threshold=0.7):
emb = embedding(images)
sims = np.zeros((len(images), len(images)))
for i in range(len(images)):
for j in range(len(images)):
sims[i][j] = (
1 - spatial.distance.cosine(emb[i], emb[j]) > threshold)
return sims | en | 0.801858 | # minimum size of face # three steps's threshold # scale factor # check is model exists # Get input and output tensors # Run forward pass to calculate embeddings | 2.29055 | 2 |
athena/athena/errors.py | aculich/openmappr | 19 | 10406 | <gh_stars>10-100
class AthenaError(Exception):
"""base class for all athena exceptions"""
pass
class AthenaMongoError(AthenaError):
"""Class for all mongo related errors"""
pass | class AthenaError(Exception):
"""base class for all athena exceptions"""
pass
class AthenaMongoError(AthenaError):
"""Class for all mongo related errors"""
pass | en | 0.713593 | base class for all athena exceptions Class for all mongo related errors | 1.830688 | 2 |
tf2stats/__init__.py | TheAntecedent/Quintessence | 1 | 10407 | <filename>tf2stats/__init__.py
from .aggregated_stats import *
from .game_stats import *
from .stat_definitions import * | <filename>tf2stats/__init__.py
from .aggregated_stats import *
from .game_stats import *
from .stat_definitions import * | none | 1 | 1.145168 | 1 |
|
tests/test_messages/test_inbound/test_manage_all_link_record.py | michaeldavie/pyinsteon | 15 | 10408 | """Test Manage All-Link Record."""
import unittest
from binascii import unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId
from pyinsteon.protocol.messages.all_link_record_flags import \
AllLinkRecordFlags
from tests import set_log_levels
from tests.utils import hex_to_inbound_message
# pylint: disable=no-member
class TestManageAllLinkRecord(unittest.TestCase):
"""Test Manage All-Link Record."""
def setUp(self):
"""Set up test."""
self.hex = "026F400405060708090a0b"
self.hex_ack = "026F400405060708090a0b06"
self.message_id = MessageId(0x6F)
self.action = ManageAllLinkRecordAction(0x40)
self.flags = AllLinkRecordFlags(0x04)
self.group = int(0x05)
self.address = Address("060708")
self.data1 = int(0x09)
self.data2 = int(0x0A)
self.data3 = int(0x0B)
self.ack = AckNak(0x06)
self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_ack)
set_log_levels(
logger="info",
logger_pyinsteon="info",
logger_messages="info",
logger_topics=False,
)
def test_id(self):
"""Test ID."""
assert self.msg.message_id == self.message_id
def test_ack_nak(self):
"""Test ACK/NAK."""
assert self.msg.ack == self.ack
def test_bytes(self):
"""Test bytes."""
assert bytes(self.msg) == unhexlify(self.hex_ack)
| """Test Manage All-Link Record."""
import unittest
from binascii import unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId
from pyinsteon.protocol.messages.all_link_record_flags import \
AllLinkRecordFlags
from tests import set_log_levels
from tests.utils import hex_to_inbound_message
# pylint: disable=no-member
class TestManageAllLinkRecord(unittest.TestCase):
"""Test Manage All-Link Record."""
def setUp(self):
"""Set up test."""
self.hex = "026F400405060708090a0b"
self.hex_ack = "026F400405060708090a0b06"
self.message_id = MessageId(0x6F)
self.action = ManageAllLinkRecordAction(0x40)
self.flags = AllLinkRecordFlags(0x04)
self.group = int(0x05)
self.address = Address("060708")
self.data1 = int(0x09)
self.data2 = int(0x0A)
self.data3 = int(0x0B)
self.ack = AckNak(0x06)
self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_ack)
set_log_levels(
logger="info",
logger_pyinsteon="info",
logger_messages="info",
logger_topics=False,
)
def test_id(self):
"""Test ID."""
assert self.msg.message_id == self.message_id
def test_ack_nak(self):
"""Test ACK/NAK."""
assert self.msg.ack == self.ack
def test_bytes(self):
"""Test bytes."""
assert bytes(self.msg) == unhexlify(self.hex_ack)
| en | 0.54433 | Test Manage All-Link Record. # pylint: disable=no-member Test Manage All-Link Record. Set up test. Test ID. Test ACK/NAK. Test bytes. | 2.439274 | 2 |
Clock/Clock_Fig3F.py | chAwater/OpenFig | 0 | 10409 | <reponame>chAwater/OpenFig
#!/usr/bin/env python
# coding: utf-8
# # Figure Info.
#
# | Title | Journal | Authors | Article Date | Code Date | Figure | Links |
# |:------|:-------:|:-------:|:------------:|:---------:|:------:|:-----:|
# |A microfluidic approach for experimentally modelling <br> the intercellular coupling system of a mammalian <br> circadian clock at single-cell level|Lab on a Chip|<NAME>|2020.03.02|2020.03.11| Fig3F | [DOI](https://doi.org/10.1039/D0LC00140F) |
#
# In[1]:
# data_file = 'SinPeaksDOWN.xls'
# new_inputs = pd.read_excel(data_file,header=None)
# new_inputs.to_csv('data.csv',index=False)
# In[2]:
import os, sys, warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['svg.fonttype'] = 'none'
sns.set_context(context='poster')
bigsize = 20
midsize = 18
smallsize = 14
hugesize = 24
# In[ ]:
# Load data
new_inputs = pd.read_csv('data.csv')
new_inputs = new_inputs.values.flatten()
new_inputs = new_inputs[~np.isnan(new_inputs)]
new_inputs = pd.Series(new_inputs)
dict_time = new_inputs.astype(int).value_counts()
# Set start and end days
d_min = np.floor( ((new_inputs-12)/24).astype(np.float).min() )
d_min = max(0, d_min)
d_max = np.ceil( ((new_inputs-12)/24).astype(np.float).max() )
drug_time = 22 + np.arange(0,d_max+1)*24
# Set plot
n_plot = int( d_max - d_min + 1 )
n_rows = int( np.ceil(n_plot/4) )
ratio_dfs_dict = dict(zip(np.arange(n_plot), [pd.DataFrame()]*n_plot))
fig, axs = plt.subplots(
ncols=4,nrows=n_rows,
figsize=(18,n_rows*4),
subplot_kw={'polar':True},
gridspec_kw={'hspace':0.5},
)
axs = axs.flatten()
# Plot data for each 24h
for i_time in dict_time.keys():
if i_time<12:
continue
d_time = int( np.floor((i_time-12)/24)-d_min )
# In one day
ratio_df = ratio_dfs_dict[d_time]
ratio_df = ratio_df.append(
{
'ref_time' : ((i_time-12) % 24),
'n' : dict_time[i_time]
}, ignore_index=True)
ratio_dfs_dict[d_time] = ratio_df
# Date to r
t_time = (((i_time-12) % 24)/24)*2*np.pi
t_drug = ((1+drug_time[d_time]-12)%24)/24*2*np.pi
axs[d_time].bar(t_drug, 1, width=2/24*2*np.pi, bottom=0.0, color='bisque', edgecolor='k', alpha=0.7, zorder=10)
axs[d_time].scatter(t_time, 0.5, color='dodgerblue', s=dict_time[i_time]*30, alpha=0.7, zorder=20)
# Plot info for each 24h
for i,ax in enumerate(axs):
labels = (12+np.arange(24*(d_min+i),24*(d_min+i+1),6)).astype(int).astype(str)
labels[0] = str( int(labels[0])+24 ) + ' / ' + labels[0]
labels[2] = labels[2] + ' h'
ax.set_xticklabels( labels, fontsize=midsize )
ax.set_yticklabels([])
ax.tick_params(axis='x', pad=0)
ratio_df = ratio_dfs_dict[i]
if ratio_df.shape[0]!=0:
r_df = pd.concat(
[
ratio_df['n'],
pd.cut(
ratio_df['ref_time'],
bins =[0, 3, 10, 14, 24 ],
labels=[ 'Q1','Q2','Q3','Q4'],
include_lowest=True,
)
], axis=1
).groupby('ref_time').sum()
r = np.round( 100*(r_df.loc['Q3']/r_df.sum())['n'], 1 )
ax.text( 12/24*2*np.pi, -0.5, str(r)+'%', fontsize=smallsize, ha='center', va='center', color='tomato' )
ax.plot(
np.linspace(10, 14, 20)/24*2*np.pi,
[0.05]*20,
lw=5, color='tomato',alpha=0.7,
zorder=20,
)
ax.set_thetagrids([0,90,180,270])
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rgrids([])
ax.set_rlim(0,1)
ax.set_rorigin(-1.0)
ax.annotate(
s='',
xytext=(np.pi/8,1),
xy=(np.pi*3/8,1),
size=40,
arrowprops={
'facecolor':'black',
'arrowstyle':'->',
'connectionstyle':"arc3,rad=-0.17",
},
)
ax.text(np.pi/4,1,'Time',fontsize=smallsize, rotation=-40, ha='center',va='bottom')
else:
lgs = []
for s in np.arange(5,30,5):
lg = ax.scatter(s, 0.5, color='dodgerblue', s=s*30, alpha=0.7, zorder=1, label=s)
lgs.append(lg)
lg = ax.scatter(1,1,marker='s',s=300, color='bisque', edgecolor='k', alpha=0.7, label='Drug')
lgs.append(lg)
ax.set_rlim(0,0.1)
ax.axis('off')
ax.legend(
handles=lgs,
ncol=2,
title='# of cells',
title_fontsize=midsize,
fontsize=smallsize,
frameon=False,
labelspacing=1.5,
handletextpad=0.2,
columnspacing=0.4,
)
fig.subplots_adjust(hspace=0.3)
fig.suptitle('Cells distribution under drug treatment', y=1, fontsize=hugesize)
fig.savefig('Clock_Fig3F.svg', transparent=True, bbox_inches='tight')
fig.savefig('Clock_Fig3F.png', transparent=True, bbox_inches='tight')
plt.show()
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# # Figure Info.
#
# | Title | Journal | Authors | Article Date | Code Date | Figure | Links |
# |:------|:-------:|:-------:|:------------:|:---------:|:------:|:-----:|
# |A microfluidic approach for experimentally modelling <br> the intercellular coupling system of a mammalian <br> circadian clock at single-cell level|Lab on a Chip|<NAME>|2020.03.02|2020.03.11| Fig3F | [DOI](https://doi.org/10.1039/D0LC00140F) |
#
# In[1]:
# data_file = 'SinPeaksDOWN.xls'
# new_inputs = pd.read_excel(data_file,header=None)
# new_inputs.to_csv('data.csv',index=False)
# In[2]:
import os, sys, warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['svg.fonttype'] = 'none'
sns.set_context(context='poster')
bigsize = 20
midsize = 18
smallsize = 14
hugesize = 24
# In[ ]:
# Load data
new_inputs = pd.read_csv('data.csv')
new_inputs = new_inputs.values.flatten()
new_inputs = new_inputs[~np.isnan(new_inputs)]
new_inputs = pd.Series(new_inputs)
dict_time = new_inputs.astype(int).value_counts()
# Set start and end days
d_min = np.floor( ((new_inputs-12)/24).astype(np.float).min() )
d_min = max(0, d_min)
d_max = np.ceil( ((new_inputs-12)/24).astype(np.float).max() )
drug_time = 22 + np.arange(0,d_max+1)*24
# Set plot
n_plot = int( d_max - d_min + 1 )
n_rows = int( np.ceil(n_plot/4) )
ratio_dfs_dict = dict(zip(np.arange(n_plot), [pd.DataFrame()]*n_plot))
fig, axs = plt.subplots(
ncols=4,nrows=n_rows,
figsize=(18,n_rows*4),
subplot_kw={'polar':True},
gridspec_kw={'hspace':0.5},
)
axs = axs.flatten()
# Plot data for each 24h
for i_time in dict_time.keys():
if i_time<12:
continue
d_time = int( np.floor((i_time-12)/24)-d_min )
# In one day
ratio_df = ratio_dfs_dict[d_time]
ratio_df = ratio_df.append(
{
'ref_time' : ((i_time-12) % 24),
'n' : dict_time[i_time]
}, ignore_index=True)
ratio_dfs_dict[d_time] = ratio_df
# Date to r
t_time = (((i_time-12) % 24)/24)*2*np.pi
t_drug = ((1+drug_time[d_time]-12)%24)/24*2*np.pi
axs[d_time].bar(t_drug, 1, width=2/24*2*np.pi, bottom=0.0, color='bisque', edgecolor='k', alpha=0.7, zorder=10)
axs[d_time].scatter(t_time, 0.5, color='dodgerblue', s=dict_time[i_time]*30, alpha=0.7, zorder=20)
# Plot info for each 24h
for i,ax in enumerate(axs):
labels = (12+np.arange(24*(d_min+i),24*(d_min+i+1),6)).astype(int).astype(str)
labels[0] = str( int(labels[0])+24 ) + ' / ' + labels[0]
labels[2] = labels[2] + ' h'
ax.set_xticklabels( labels, fontsize=midsize )
ax.set_yticklabels([])
ax.tick_params(axis='x', pad=0)
ratio_df = ratio_dfs_dict[i]
if ratio_df.shape[0]!=0:
r_df = pd.concat(
[
ratio_df['n'],
pd.cut(
ratio_df['ref_time'],
bins =[0, 3, 10, 14, 24 ],
labels=[ 'Q1','Q2','Q3','Q4'],
include_lowest=True,
)
], axis=1
).groupby('ref_time').sum()
r = np.round( 100*(r_df.loc['Q3']/r_df.sum())['n'], 1 )
ax.text( 12/24*2*np.pi, -0.5, str(r)+'%', fontsize=smallsize, ha='center', va='center', color='tomato' )
ax.plot(
np.linspace(10, 14, 20)/24*2*np.pi,
[0.05]*20,
lw=5, color='tomato',alpha=0.7,
zorder=20,
)
ax.set_thetagrids([0,90,180,270])
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rgrids([])
ax.set_rlim(0,1)
ax.set_rorigin(-1.0)
ax.annotate(
s='',
xytext=(np.pi/8,1),
xy=(np.pi*3/8,1),
size=40,
arrowprops={
'facecolor':'black',
'arrowstyle':'->',
'connectionstyle':"arc3,rad=-0.17",
},
)
ax.text(np.pi/4,1,'Time',fontsize=smallsize, rotation=-40, ha='center',va='bottom')
else:
lgs = []
for s in np.arange(5,30,5):
lg = ax.scatter(s, 0.5, color='dodgerblue', s=s*30, alpha=0.7, zorder=1, label=s)
lgs.append(lg)
lg = ax.scatter(1,1,marker='s',s=300, color='bisque', edgecolor='k', alpha=0.7, label='Drug')
lgs.append(lg)
ax.set_rlim(0,0.1)
ax.axis('off')
ax.legend(
handles=lgs,
ncol=2,
title='# of cells',
title_fontsize=midsize,
fontsize=smallsize,
frameon=False,
labelspacing=1.5,
handletextpad=0.2,
columnspacing=0.4,
)
fig.subplots_adjust(hspace=0.3)
fig.suptitle('Cells distribution under drug treatment', y=1, fontsize=hugesize)
fig.savefig('Clock_Fig3F.svg', transparent=True, bbox_inches='tight')
fig.savefig('Clock_Fig3F.png', transparent=True, bbox_inches='tight')
plt.show()
# In[ ]: | en | 0.43014 | #!/usr/bin/env python # coding: utf-8 # # Figure Info. # # | Title | Journal | Authors | Article Date | Code Date | Figure | Links | # |:------|:-------:|:-------:|:------------:|:---------:|:------:|:-----:| # |A microfluidic approach for experimentally modelling <br> the intercellular coupling system of a mammalian <br> circadian clock at single-cell level|Lab on a Chip|<NAME>|2020.03.02|2020.03.11| Fig3F | [DOI](https://doi.org/10.1039/D0LC00140F) | # # In[1]: # data_file = 'SinPeaksDOWN.xls' # new_inputs = pd.read_excel(data_file,header=None) # new_inputs.to_csv('data.csv',index=False) # In[2]: # In[ ]: # Load data # Set start and end days # Set plot # Plot data for each 24h # In one day # Date to r # Plot info for each 24h # In[ ]: | 2.522221 | 3 |
rameniaapp/views/report.py | awlane/ramenia | 0 | 10410 | from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from django.template import loader
from django.conf import settings
from django.contrib.auth.models import User
from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle
from django.views.generic import ListView, FormView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from rameniaapp.decorators import user_is_moderator
from rameniaapp.actionhookutils import dispatch_hook
from rameniaapp.utils import UserIsModeratorMixin
from django.forms.widgets import Select
from django.contrib import messages
class ReportForm(LoginRequiredMixin, CreateView):
'''Class based view for creating reports'''
template_name = "report_form.html"
model = Report
success_url = "/app"
fields = ["reason"]
url_path = "/app"
login_url="/app/login"
def get_form(self, form_class=None):
form = super(ReportForm, self).get_form(form_class)
form.fields['reason'].widget.attrs.update({'class':'form-control'})
return form
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.reporter = self.request.user
form.instance.status = 'OP'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Adds url_path value and relevant object id to template'''
context = super().get_context_data(**kwargs)
context["id"] = self.kwargs["id"]
context["url_path"] = self.url_path
return context
class NoodleReportForm(ReportForm):
'''Class based view for reporting noodles'''
model = NoodleReport
#This is used to allow the form to create the correct object
url_path = "noodle_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.noodle = Noodle.objects.get(pk=self.kwargs["id"])
form.instance.type = 'ND'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Noodle.objects.get(pk=self.kwargs["id"]).name
return context
class ReviewReportForm(ReportForm):
'''Class based view for reporting reviews'''
model = ReviewReport
url_path = "review_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.review = Review.objects.get(pk=self.kwargs["id"])
form.instance.type = 'RV'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Review.objects.get(pk=self.kwargs["id"]).title
return context
class ProfileReportForm(ReportForm):
'''Class based view for reporting profile'''
model = ProfileReport
url_path = "profile_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.profile = Profile.objects.get(pk=self.kwargs["id"])
form.instance.type = 'PF'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Profile.objects.get(pk=self.kwargs["id"]).name
return context
class ReportList(LoginRequiredMixin, UserIsModeratorMixin, ListView):
'''Class based view for viewing reports'''
# These values are overriden for the subclasses so we can create
# multiple types of noodles without rewriting code
model = Report
item_type = ""
context_object_name = "reports"
template_name = "report_view.html"
login_url="/app/login"
def get_queryset(self):
'''Get all reports for specific objects'''
if "item_id" in self.kwargs:
item_tuple = self.get_item(self.kwargs["item_id"])
self.kwargs[item_tuple[0]] = item_tuple[1]
# This prevents the next line from breaking
del self.kwargs["item_id"]
# Using get_item, this lets us filter for any kind of object without
# writing extra code
return self.model.objects.filter(**self.kwargs)
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
return (None, None)
def get_context_data(self, **kwargs):
'''Knowing the item type lets us not break things'''
context = super().get_context_data(**kwargs)
context['item_type'] = self.item_type
return context
class NoodleReportList(ReportList):
'''List of noodle reports'''
model = NoodleReport
item_type = "Noodles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
noodle = Noodle.objects.get(id=id)
return ("noodle", noodle)
class ReviewReportList(ReportList):
'''List of review reports'''
model = ReviewReport
item_type = "Reviews"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
review = Review.objects.get(id=id)
return ("review", review)
class ProfileReportList(ReportList):
'''List of profile reports'''
model = ProfileReport
item_type = "Profiles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
profile = Profile.objects.get(id=id)
return ("profile", profile)
@login_required(login_url="/app/login")
@user_is_moderator
def ban_user(request, report_type, user_id):
'''Ban a user by their id; expects report_type arg for redirect reasons'''
if request.method == "POST":
user = User.objects.get(pk=user_id).delete()
path = None
if report_type == "ND":
path = "reports/noodle"
elif report_type == "RV":
path = "reports/review"
elif report_type == "PF":
path = "reports/profile"
messages.add_message(request, messages.WARNING, "User banned")
return HttpResponseRedirect("/app/mod/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def delete_content(request, report_id):
'''This method deletes offending items that have been reported, or just their content'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
reporter = report.reporter
creator = None
path = get_return_path(report)
# Deleting object is dependent on type
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
report.review.delete()
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
report.noodle.delete()
elif report.type == "PF":
# Deleting a profile will break fundamental assumptions, so we instead
# remove all content from it.
report = ProfileReport.objects.get(pk=report_id)
report.profile.name = "AnonymousUser"
report.profile.profile_pic = Profile._meta.get_field('profile_pic').default
report.profile.metadata["Description"] = ""
report.profile.save()
creator = report.profile.user
report.delete()
# If we delete the content, it was reasonable to report it
dispatch_hook(reporter, "good-report")
if creator:
# If the noodle's creator hasn't been banned, penalize them
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.WARNING, "Content deleted")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def update_report_status(request, report_id, status):
'''Change report status to "open", "resolved", or "spam"'''
if request.method == "POST":
# Validate status is the correct value
if status in dict(Report.STATUS_CHOICES):
report = Report.objects.get(pk=report_id)
report.status = status
report.save()
creator = None
path = get_return_path(report)
# Get the creator of the relevant object/report
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
elif report.type == "PF":
report = ProfileReport.objects.get(pk=report_id)
creator = report.profile.user
# Reward people for good reports
if status == "ED":
if report.reporter:
dispatch_hook(report.reporter, "good-report")
if creator:
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.SUCCESS, "Report marked as resolved")
# Penalize people for bad reports
if status == "SP":
if report.reporter:
dispatch_hook(report.reporter, "bad-report")
messages.add_message(request, messages.WARNING, "Report marked as spam")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def ignore_report(request, report_id):
'''Ignore (delete) a report'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
path = get_return_path(report)
if report.reporter:
# We assume a bad report is worth deleting if its creator
# wasn't banned
dispatch_hook(report.reporter, "bad-report")
report.delete()
messages.add_message(request, messages.WARNING, "Report ignored")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
def get_return_path(report):
'''Util method to return a correct redirect path'''
if report.type == "RV":
return "review"
elif report.type == "ND":
return "noodle"
elif report.type == "PF":
return "profile" | from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from django.template import loader
from django.conf import settings
from django.contrib.auth.models import User
from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle
from django.views.generic import ListView, FormView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from rameniaapp.decorators import user_is_moderator
from rameniaapp.actionhookutils import dispatch_hook
from rameniaapp.utils import UserIsModeratorMixin
from django.forms.widgets import Select
from django.contrib import messages
class ReportForm(LoginRequiredMixin, CreateView):
'''Class based view for creating reports'''
template_name = "report_form.html"
model = Report
success_url = "/app"
fields = ["reason"]
url_path = "/app"
login_url="/app/login"
def get_form(self, form_class=None):
form = super(ReportForm, self).get_form(form_class)
form.fields['reason'].widget.attrs.update({'class':'form-control'})
return form
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.reporter = self.request.user
form.instance.status = 'OP'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Adds url_path value and relevant object id to template'''
context = super().get_context_data(**kwargs)
context["id"] = self.kwargs["id"]
context["url_path"] = self.url_path
return context
class NoodleReportForm(ReportForm):
'''Class based view for reporting noodles'''
model = NoodleReport
#This is used to allow the form to create the correct object
url_path = "noodle_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.noodle = Noodle.objects.get(pk=self.kwargs["id"])
form.instance.type = 'ND'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Noodle.objects.get(pk=self.kwargs["id"]).name
return context
class ReviewReportForm(ReportForm):
'''Class based view for reporting reviews'''
model = ReviewReport
url_path = "review_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.review = Review.objects.get(pk=self.kwargs["id"])
form.instance.type = 'RV'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Review.objects.get(pk=self.kwargs["id"]).title
return context
class ProfileReportForm(ReportForm):
'''Class based view for reporting profile'''
model = ProfileReport
url_path = "profile_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.profile = Profile.objects.get(pk=self.kwargs["id"])
form.instance.type = 'PF'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Profile.objects.get(pk=self.kwargs["id"]).name
return context
class ReportList(LoginRequiredMixin, UserIsModeratorMixin, ListView):
'''Class based view for viewing reports'''
# These values are overriden for the subclasses so we can create
# multiple types of noodles without rewriting code
model = Report
item_type = ""
context_object_name = "reports"
template_name = "report_view.html"
login_url="/app/login"
def get_queryset(self):
'''Get all reports for specific objects'''
if "item_id" in self.kwargs:
item_tuple = self.get_item(self.kwargs["item_id"])
self.kwargs[item_tuple[0]] = item_tuple[1]
# This prevents the next line from breaking
del self.kwargs["item_id"]
# Using get_item, this lets us filter for any kind of object without
# writing extra code
return self.model.objects.filter(**self.kwargs)
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
return (None, None)
def get_context_data(self, **kwargs):
'''Knowing the item type lets us not break things'''
context = super().get_context_data(**kwargs)
context['item_type'] = self.item_type
return context
class NoodleReportList(ReportList):
'''List of noodle reports'''
model = NoodleReport
item_type = "Noodles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
noodle = Noodle.objects.get(id=id)
return ("noodle", noodle)
class ReviewReportList(ReportList):
'''List of review reports'''
model = ReviewReport
item_type = "Reviews"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
review = Review.objects.get(id=id)
return ("review", review)
class ProfileReportList(ReportList):
'''List of profile reports'''
model = ProfileReport
item_type = "Profiles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
profile = Profile.objects.get(id=id)
return ("profile", profile)
@login_required(login_url="/app/login")
@user_is_moderator
def ban_user(request, report_type, user_id):
'''Ban a user by their id; expects report_type arg for redirect reasons'''
if request.method == "POST":
user = User.objects.get(pk=user_id).delete()
path = None
if report_type == "ND":
path = "reports/noodle"
elif report_type == "RV":
path = "reports/review"
elif report_type == "PF":
path = "reports/profile"
messages.add_message(request, messages.WARNING, "User banned")
return HttpResponseRedirect("/app/mod/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def delete_content(request, report_id):
'''This method deletes offending items that have been reported, or just their content'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
reporter = report.reporter
creator = None
path = get_return_path(report)
# Deleting object is dependent on type
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
report.review.delete()
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
report.noodle.delete()
elif report.type == "PF":
# Deleting a profile will break fundamental assumptions, so we instead
# remove all content from it.
report = ProfileReport.objects.get(pk=report_id)
report.profile.name = "AnonymousUser"
report.profile.profile_pic = Profile._meta.get_field('profile_pic').default
report.profile.metadata["Description"] = ""
report.profile.save()
creator = report.profile.user
report.delete()
# If we delete the content, it was reasonable to report it
dispatch_hook(reporter, "good-report")
if creator:
# If the noodle's creator hasn't been banned, penalize them
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.WARNING, "Content deleted")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def update_report_status(request, report_id, status):
'''Change report status to "open", "resolved", or "spam"'''
if request.method == "POST":
# Validate status is the correct value
if status in dict(Report.STATUS_CHOICES):
report = Report.objects.get(pk=report_id)
report.status = status
report.save()
creator = None
path = get_return_path(report)
# Get the creator of the relevant object/report
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
elif report.type == "PF":
report = ProfileReport.objects.get(pk=report_id)
creator = report.profile.user
# Reward people for good reports
if status == "ED":
if report.reporter:
dispatch_hook(report.reporter, "good-report")
if creator:
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.SUCCESS, "Report marked as resolved")
# Penalize people for bad reports
if status == "SP":
if report.reporter:
dispatch_hook(report.reporter, "bad-report")
messages.add_message(request, messages.WARNING, "Report marked as spam")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def ignore_report(request, report_id):
'''Ignore (delete) a report'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
path = get_return_path(report)
if report.reporter:
# We assume a bad report is worth deleting if its creator
# wasn't banned
dispatch_hook(report.reporter, "bad-report")
report.delete()
messages.add_message(request, messages.WARNING, "Report ignored")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
def get_return_path(report):
'''Util method to return a correct redirect path'''
if report.type == "RV":
return "review"
elif report.type == "ND":
return "noodle"
elif report.type == "PF":
return "profile" | en | 0.885907 | Class based view for creating reports Ensures hidden form values are filled Adds url_path value and relevant object id to template Class based view for reporting noodles #This is used to allow the form to create the correct object Ensures hidden form values are filled Passes item name to template Class based view for reporting reviews Ensures hidden form values are filled Passes item name to template Class based view for reporting profile Ensures hidden form values are filled Passes item name to template Class based view for viewing reports # These values are overriden for the subclasses so we can create # multiple types of noodles without rewriting code Get all reports for specific objects # This prevents the next line from breaking # Using get_item, this lets us filter for any kind of object without # writing extra code Returns a tuple containing the key name and item Knowing the item type lets us not break things List of noodle reports Returns a tuple containing the key name and item List of review reports Returns a tuple containing the key name and item List of profile reports Returns a tuple containing the key name and item Ban a user by their id; expects report_type arg for redirect reasons This method deletes offending items that have been reported, or just their content # Deleting object is dependent on type # Deleting a profile will break fundamental assumptions, so we instead # remove all content from it. # If we delete the content, it was reasonable to report it # If the noodle's creator hasn't been banned, penalize them Change report status to "open", "resolved", or "spam" # Validate status is the correct value # Get the creator of the relevant object/report # Reward people for good reports # Penalize people for bad reports Ignore (delete) a report # We assume a bad report is worth deleting if its creator # wasn't banned Util method to return a correct redirect path | 1.974638 | 2 |
pyparser.py | ddurvaux/PyUnpacker | 0 | 10411 | #!/usr/bin/python
#
# This tool is an attempt to automate some taks related
# to malware unpacking.
#
# Most (if not all) of the tricks used in this tool
# directly comes from an excellent course given
# by <NAME> (@nicolasbrulez)
#
# Tool developped by David DURVAUX for Autopsit
# (commercial brand of N-Labs sprl)
#
# TODO
# - everything
# - VirusTotal Support
# - dynamic analysis (GDB? Valgring?)
# - static code analysis with Radare2
# - add argument for PEID
# - save status / restore (config/analysis)
# - extract fucnction without offset for comparison of samples
# - ..
#
#
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__version__ = '0.01'
# Imports required by this tool
import os
import sys
import json
import pefile
import peutils
import argparse
from distorm3 import Decode, Decode16Bits, Decode32Bits, Decode64Bits, Decompose, DecomposeGenerator, DF_STOP_ON_FLOW_CONTROL
# Imports part of this tool
import static.vivframework
# --------------------------------------------------------------------------- #
# REPRESENTATION OF THE CONFIGURATION
# --------------------------------------------------------------------------- #
class Configuration:
force = False # force to redo all the analysis
modstatic = None # static analysis module
moddynamic = None # dynamic analysis module
# DB downloaded on
# https://raw.githubusercontent.com/viper-framework/viper/master/data/peid/UserDB.TXT (UPX not detected)
# https://raw.githubusercontent.com/ynadji/peid/master/userdb.txt (problems)
# http://blog.didierstevens.com/programs/yara-rules/
signatures = peutils.SignatureDatabase('./peid/peid-userdb-rules-with-pe-module.yara')
def __init__(self):
return
def save(self, filename="./config.json"):
config = {
"force": self.force,
"modstatic": self.modstatic,
"moddynamic": self.moddynamic
}
try:
# write configuration to file
fd = open(filename, "w")
json.dump(config, fd)
fd.close()
print("Configuration saved to %s" % filename)
except Exception as e:
print("Impossible to save configuration to %s" % filename)
print(e)
return
def load(self, filename="./config.json"):
config = {}
try:
# read configuration from file
fd = open(filename, "r")
config = json.load(fd)
fd.close()
# update internal state
self.__dict__[key] = config[key]
except Exception as e:
print("Impossible to load configuration from %s" % filename)
print(e)
return
# --------------------------------------------------------------------------- #
# REPRESENTATION OF THE INFO RETRIEVED
# --------------------------------------------------------------------------- #
class BinaryInformations:
"""
This class will represent and hold all the information
retrieved from the binary
"""
vtinfo = {}
peheader = {}
bininfo = {}
settings = {}
packed_score = 0 # current packed score
packed_test = 0 # number of test done
breakpoints = [] # breakoint to set for unpacking
anti_debug = False
def __init__(self):
return
def log(self):
#TODO IMPLEMENT
return
def save(self, filename=sys.stdout):
print ("NOT YET IMPLEMENTED!")
return
# --------------------------------------------------------------------------- #
# STATIC ANALYSIS OF BINARY
# --------------------------------------------------------------------------- #
class StaticAnalysis:
"""
Tools to analyze statically binaries
@TODO: define access to page_size, margin, entropy_threshold and packed_score
"""
# class variable
configuration = None
binary = None
bininfo = None
page_size = 0
margin= 0
entropy_threshold = 0
packed_score = 0
SFLAGS = {
"CODE" : 0x00000020,
"DATA" : 0x00000040,
"EXEC" : 0x20000000,
"READ" : 0x40000000,
"WRIT" : 0x80000000
# other: check https://msdn.microsoft.com/en-us/library/ms809762.aspx
}
def __init__(self, binary, configuration, page_size=0x1000, margin=0.1, entropy_threshold = 7.0, packed_score=0):
"""
binary the path to the binary to analyze
"""
# set parameters
self.binary = binary
self.page_size = page_size
self.margin = margin
self.entropy_threshold = entropy_threshold
self.packed_score = packed_score
# instanciate internal objects
self.pe = pefile.PE(binary)
self.bininfo = BinaryInformations()
# keep track of the current configuration
self.configuration = configuration
# initialize static analysis module (TODO - add support for others)
self.configuration.modstatic = static.vivframework.Vivisect(self.binary, self.bininfo, self.configuration.force)
# update BinaryInformation with current settings:
self.bininfo.settings["peanalysis"] = {
"binary" : self.binary,
"page_size" : self.page_size,
"margin" : self.margin,
"entropy_threshold" : self.entropy_threshold,
"packed_score" : self.packed_score
}
# CHECK BINARY SECTIONS
def analyzeSections(self):
"""
TODO: mutliple output support, number of test
Need to Add:
- check section names
- check where entry point is located (in the last section)
- first section should be writeable
- last section should be executable
- ...
"""
# check number of sections
if(len(self.pe.sections)) != 3:
print "ABNOMALIE in NUMBER OF SECTIONS (%d)!!" % len(self.pe.sections)
self.bininfo.packed_score += 1
self.bininfo.packed_test += 1
# check section + boundary and see if it matches
for section in self.pe.sections:
[name, vaddr, vsize, rsize, flags] = [section.Name, section.VirtualAddress, section.Misc_VirtualSize, section.SizeOfRawData, section.Characteristics]
# check flags
if( int(flags ^ (self.SFLAGS["EXEC"] | self.SFLAGS["WRIT"])) == 0 ): # check if section is executable + writeable
print "ABNOMALIE SECTION SHOULD NOT BE WRITEABLE AND EXECUTABLE (W^X violation)!!"
self.bininfo.packed_score += 1
# check sections sizes (incl. page alignment)
# the rsize need to be written in a multiple of memory page size (min 1.)
# a margin is added (could be customized)
if (rsize / self.page_size + 1) * self.page_size * (1 + self.margin) < vsize:
print "ABNOMALIES with VIRTUAL SIZE ALLOCATION for SECTION: %s" % name
self.bininfo.packed_score += 1
# check entropy
if(section.get_entropy() >= self.entropy_threshold):
print "ABNORMAL ENTROPY (%s)) for SECTION: %s" % (section.get_entropy(), name)
self.bininfo.packed_score += 1
# update bininfo status
self.bininfo.packed_test += 3 # 3 tests are done for each section
print ("TOTAL PACKED SCORE: %s / %s" % (self.bininfo.packed_score, self.bininfo.packed_test))
return self.bininfo
def callPEiD(self):
"""
Use set of YARA rules to search for known packers
TODO - add a check on signature presence or download or end
- postpone initialization of signatures DB here!!
"""
matches = self.configuration.signatures.match(self.pe, ep_only = True)
if(matches is not None):
if(len(matches) > 0):
print "PACKER FOUND: %s" % matches[0]
return self.bininfo
def graphSearch(self):
"""
Do a graph search in the code for leaf nodes
"""
self.configuration.modstatic.graphSearch()
def isAntiDebug(self):
if self.configuration.modstatic.isAntiDebug():
print "WARNING: ANTI-DEBUGGING TRICKS FOUND!"
def searchVirtualAlloc(self):
self.configuration.modstatic.searchVirtualAlloc()
def getPerFunctionHash(self):
self.configuration.modstatic.getPerFunctionHash()
def decompile(self):
"""
! need to take in account offset in memory !
-- CODE TO REMOVE -- DEPRECATED --
"""
fd = open(self.binary, "rb")
l = DecomposeGenerator(0x100, fd.read(), Decode32Bits, DF_STOP_ON_FLOW_CONTROL)
while(l is not None):
# -- BEGIN TEST CODE --
for i in l:
#print "0x%08x (%02x) %-20s %s" % (i[0], i[1], i[3], i[2])
print "0x%08x %s" % (i.address, i)
# -- END TEST CODE --
l = DecomposeGenerator(0x100, fd.read(), Decode32Bits, DF_STOP_ON_FLOW_CONTROL)
fd.close()
return
# --------------------------------------------------------------------------- #
# MAIN SECTION OF CODE
# --------------------------------------------------------------------------- #
def start_analysis(binary, configuration):
sa = StaticAnalysis(binary, configuration)
sa.analyzeSections()
sa.callPEiD()
sa.graphSearch()
sa.isAntiDebug()
sa.searchVirtualAlloc()
sa.getPerFunctionHash() #TEST
#sa.decompile() # TEST
return
def main():
# Argument definition
parser = argparse.ArgumentParser(description='Analyse binaries and try to help with deobfuscation')
parser.add_argument('-b', '--binary', help='Binary to analyze')
parser.add_argument('-f', '--force', help='Force a fresh analysis, no restoration of previous work', action="store_true")
parser.add_argument('-y', '--yara', help='Path to YARA DB to use to scan binary')
parser.add_argument('-viv', '--vivisect', help='Path to vivisect installation')
# create a configuration holder
configuration = Configuration()
# Start the fun part :)
args = parser.parse_args()
# if force flag is defined, change behaviour
if args.force:
configuration.force = True
# set YARA DB signature
if args.yara:
if os.path.isfile(args.yara):
configuration.signatures = args.yara
else:
print "ERROR: %s not found!" % args.yara
exit()
# TEST - save configuration for re-use
#configuration.save()
configuration.load()
# set Vivisect path and Initialize
# currently only vivisect is supported
# this code need to be changed if other libraries get supported later
if args.vivisect:
if os.path.isdir(args.vivisect):
sys.path.append(args.vivisect)
else:
print "ERROR: %s not found!" % args.vivisect
exit()
# Check if an output directory is set
binary = None
if args.binary:
if os.path.isfile(args.binary):
binary = args.binary
start_analysis(binary, configuration)
else:
print "You need to specify a file to analyze"
exit()
if __name__ == "__main__":
main()
# --------------------------------------------------------------------------- #
# That's all folk ;)
# --------------------------------------------------------------------------- # | #!/usr/bin/python
#
# This tool is an attempt to automate some taks related
# to malware unpacking.
#
# Most (if not all) of the tricks used in this tool
# directly comes from an excellent course given
# by <NAME> (@nicolasbrulez)
#
# Tool developped by David DURVAUX for Autopsit
# (commercial brand of N-Labs sprl)
#
# TODO
# - everything
# - VirusTotal Support
# - dynamic analysis (GDB? Valgring?)
# - static code analysis with Radare2
# - add argument for PEID
# - save status / restore (config/analysis)
# - extract fucnction without offset for comparison of samples
# - ..
#
#
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__version__ = '0.01'
# Imports required by this tool
import os
import sys
import json
import pefile
import peutils
import argparse
from distorm3 import Decode, Decode16Bits, Decode32Bits, Decode64Bits, Decompose, DecomposeGenerator, DF_STOP_ON_FLOW_CONTROL
# Imports part of this tool
import static.vivframework
# --------------------------------------------------------------------------- #
# REPRESENTATION OF THE CONFIGURATION
# --------------------------------------------------------------------------- #
class Configuration:
force = False # force to redo all the analysis
modstatic = None # static analysis module
moddynamic = None # dynamic analysis module
# DB downloaded on
# https://raw.githubusercontent.com/viper-framework/viper/master/data/peid/UserDB.TXT (UPX not detected)
# https://raw.githubusercontent.com/ynadji/peid/master/userdb.txt (problems)
# http://blog.didierstevens.com/programs/yara-rules/
signatures = peutils.SignatureDatabase('./peid/peid-userdb-rules-with-pe-module.yara')
def __init__(self):
return
def save(self, filename="./config.json"):
config = {
"force": self.force,
"modstatic": self.modstatic,
"moddynamic": self.moddynamic
}
try:
# write configuration to file
fd = open(filename, "w")
json.dump(config, fd)
fd.close()
print("Configuration saved to %s" % filename)
except Exception as e:
print("Impossible to save configuration to %s" % filename)
print(e)
return
def load(self, filename="./config.json"):
config = {}
try:
# read configuration from file
fd = open(filename, "r")
config = json.load(fd)
fd.close()
# update internal state
self.__dict__[key] = config[key]
except Exception as e:
print("Impossible to load configuration from %s" % filename)
print(e)
return
# --------------------------------------------------------------------------- #
# REPRESENTATION OF THE INFO RETRIEVED
# --------------------------------------------------------------------------- #
class BinaryInformations:
"""
This class will represent and hold all the information
retrieved from the binary
"""
vtinfo = {}
peheader = {}
bininfo = {}
settings = {}
packed_score = 0 # current packed score
packed_test = 0 # number of test done
breakpoints = [] # breakoint to set for unpacking
anti_debug = False
def __init__(self):
return
def log(self):
#TODO IMPLEMENT
return
def save(self, filename=sys.stdout):
print ("NOT YET IMPLEMENTED!")
return
# --------------------------------------------------------------------------- #
# STATIC ANALYSIS OF BINARY
# --------------------------------------------------------------------------- #
class StaticAnalysis:
"""
Tools to analyze statically binaries
@TODO: define access to page_size, margin, entropy_threshold and packed_score
"""
# class variable
configuration = None
binary = None
bininfo = None
page_size = 0
margin= 0
entropy_threshold = 0
packed_score = 0
SFLAGS = {
"CODE" : 0x00000020,
"DATA" : 0x00000040,
"EXEC" : 0x20000000,
"READ" : 0x40000000,
"WRIT" : 0x80000000
# other: check https://msdn.microsoft.com/en-us/library/ms809762.aspx
}
def __init__(self, binary, configuration, page_size=0x1000, margin=0.1, entropy_threshold = 7.0, packed_score=0):
"""
binary the path to the binary to analyze
"""
# set parameters
self.binary = binary
self.page_size = page_size
self.margin = margin
self.entropy_threshold = entropy_threshold
self.packed_score = packed_score
# instanciate internal objects
self.pe = pefile.PE(binary)
self.bininfo = BinaryInformations()
# keep track of the current configuration
self.configuration = configuration
# initialize static analysis module (TODO - add support for others)
self.configuration.modstatic = static.vivframework.Vivisect(self.binary, self.bininfo, self.configuration.force)
# update BinaryInformation with current settings:
self.bininfo.settings["peanalysis"] = {
"binary" : self.binary,
"page_size" : self.page_size,
"margin" : self.margin,
"entropy_threshold" : self.entropy_threshold,
"packed_score" : self.packed_score
}
# CHECK BINARY SECTIONS
def analyzeSections(self):
"""
TODO: mutliple output support, number of test
Need to Add:
- check section names
- check where entry point is located (in the last section)
- first section should be writeable
- last section should be executable
- ...
"""
# check number of sections
if(len(self.pe.sections)) != 3:
print "ABNOMALIE in NUMBER OF SECTIONS (%d)!!" % len(self.pe.sections)
self.bininfo.packed_score += 1
self.bininfo.packed_test += 1
# check section + boundary and see if it matches
for section in self.pe.sections:
[name, vaddr, vsize, rsize, flags] = [section.Name, section.VirtualAddress, section.Misc_VirtualSize, section.SizeOfRawData, section.Characteristics]
# check flags
if( int(flags ^ (self.SFLAGS["EXEC"] | self.SFLAGS["WRIT"])) == 0 ): # check if section is executable + writeable
print "ABNOMALIE SECTION SHOULD NOT BE WRITEABLE AND EXECUTABLE (W^X violation)!!"
self.bininfo.packed_score += 1
# check sections sizes (incl. page alignment)
# the rsize need to be written in a multiple of memory page size (min 1.)
# a margin is added (could be customized)
if (rsize / self.page_size + 1) * self.page_size * (1 + self.margin) < vsize:
print "ABNOMALIES with VIRTUAL SIZE ALLOCATION for SECTION: %s" % name
self.bininfo.packed_score += 1
# check entropy
if(section.get_entropy() >= self.entropy_threshold):
print "ABNORMAL ENTROPY (%s)) for SECTION: %s" % (section.get_entropy(), name)
self.bininfo.packed_score += 1
# update bininfo status
self.bininfo.packed_test += 3 # 3 tests are done for each section
print ("TOTAL PACKED SCORE: %s / %s" % (self.bininfo.packed_score, self.bininfo.packed_test))
return self.bininfo
def callPEiD(self):
"""
Use set of YARA rules to search for known packers
TODO - add a check on signature presence or download or end
- postpone initialization of signatures DB here!!
"""
matches = self.configuration.signatures.match(self.pe, ep_only = True)
if(matches is not None):
if(len(matches) > 0):
print "PACKER FOUND: %s" % matches[0]
return self.bininfo
def graphSearch(self):
"""
Do a graph search in the code for leaf nodes
"""
self.configuration.modstatic.graphSearch()
def isAntiDebug(self):
if self.configuration.modstatic.isAntiDebug():
print "WARNING: ANTI-DEBUGGING TRICKS FOUND!"
def searchVirtualAlloc(self):
self.configuration.modstatic.searchVirtualAlloc()
def getPerFunctionHash(self):
self.configuration.modstatic.getPerFunctionHash()
def decompile(self):
"""
! need to take in account offset in memory !
-- CODE TO REMOVE -- DEPRECATED --
"""
fd = open(self.binary, "rb")
l = DecomposeGenerator(0x100, fd.read(), Decode32Bits, DF_STOP_ON_FLOW_CONTROL)
while(l is not None):
# -- BEGIN TEST CODE --
for i in l:
#print "0x%08x (%02x) %-20s %s" % (i[0], i[1], i[3], i[2])
print "0x%08x %s" % (i.address, i)
# -- END TEST CODE --
l = DecomposeGenerator(0x100, fd.read(), Decode32Bits, DF_STOP_ON_FLOW_CONTROL)
fd.close()
return
# --------------------------------------------------------------------------- #
# MAIN SECTION OF CODE
# --------------------------------------------------------------------------- #
def start_analysis(binary, configuration):
sa = StaticAnalysis(binary, configuration)
sa.analyzeSections()
sa.callPEiD()
sa.graphSearch()
sa.isAntiDebug()
sa.searchVirtualAlloc()
sa.getPerFunctionHash() #TEST
#sa.decompile() # TEST
return
def main():
# Argument definition
parser = argparse.ArgumentParser(description='Analyse binaries and try to help with deobfuscation')
parser.add_argument('-b', '--binary', help='Binary to analyze')
parser.add_argument('-f', '--force', help='Force a fresh analysis, no restoration of previous work', action="store_true")
parser.add_argument('-y', '--yara', help='Path to YARA DB to use to scan binary')
parser.add_argument('-viv', '--vivisect', help='Path to vivisect installation')
# create a configuration holder
configuration = Configuration()
# Start the fun part :)
args = parser.parse_args()
# if force flag is defined, change behaviour
if args.force:
configuration.force = True
# set YARA DB signature
if args.yara:
if os.path.isfile(args.yara):
configuration.signatures = args.yara
else:
print "ERROR: %s not found!" % args.yara
exit()
# TEST - save configuration for re-use
#configuration.save()
configuration.load()
# set Vivisect path and Initialize
# currently only vivisect is supported
# this code need to be changed if other libraries get supported later
if args.vivisect:
if os.path.isdir(args.vivisect):
sys.path.append(args.vivisect)
else:
print "ERROR: %s not found!" % args.vivisect
exit()
# Check if an output directory is set
binary = None
if args.binary:
if os.path.isfile(args.binary):
binary = args.binary
start_analysis(binary, configuration)
else:
print "You need to specify a file to analyze"
exit()
if __name__ == "__main__":
main()
# --------------------------------------------------------------------------- #
# That's all folk ;)
# --------------------------------------------------------------------------- # | en | 0.58471 | #!/usr/bin/python # # This tool is an attempt to automate some taks related # to malware unpacking. # # Most (if not all) of the tricks used in this tool # directly comes from an excellent course given # by <NAME> (@nicolasbrulez) # # Tool developped by David DURVAUX for Autopsit # (commercial brand of N-Labs sprl) # # TODO # - everything # - VirusTotal Support # - dynamic analysis (GDB? Valgring?) # - static code analysis with Radare2 # - add argument for PEID # - save status / restore (config/analysis) # - extract fucnction without offset for comparison of samples # - .. # # # Imports required by this tool # Imports part of this tool # --------------------------------------------------------------------------- # # REPRESENTATION OF THE CONFIGURATION # --------------------------------------------------------------------------- # # force to redo all the analysis # static analysis module # dynamic analysis module # DB downloaded on # https://raw.githubusercontent.com/viper-framework/viper/master/data/peid/UserDB.TXT (UPX not detected) # https://raw.githubusercontent.com/ynadji/peid/master/userdb.txt (problems) # http://blog.didierstevens.com/programs/yara-rules/ # write configuration to file # read configuration from file # update internal state # --------------------------------------------------------------------------- # # REPRESENTATION OF THE INFO RETRIEVED # --------------------------------------------------------------------------- # This class will represent and hold all the information retrieved from the binary # current packed score # number of test done # breakoint to set for unpacking #TODO IMPLEMENT # --------------------------------------------------------------------------- # # STATIC ANALYSIS OF BINARY # --------------------------------------------------------------------------- # Tools to analyze statically binaries @TODO: define access to page_size, margin, entropy_threshold and packed_score # class variable # other: check https://msdn.microsoft.com/en-us/library/ms809762.aspx binary the path to the binary to analyze # set parameters # instanciate internal objects # keep track of the current configuration # initialize static analysis module (TODO - add support for others) # update BinaryInformation with current settings: # CHECK BINARY SECTIONS TODO: mutliple output support, number of test Need to Add: - check section names - check where entry point is located (in the last section) - first section should be writeable - last section should be executable - ... # check number of sections # check section + boundary and see if it matches # check flags # check if section is executable + writeable # check sections sizes (incl. page alignment) # the rsize need to be written in a multiple of memory page size (min 1.) # a margin is added (could be customized) # check entropy # update bininfo status # 3 tests are done for each section Use set of YARA rules to search for known packers TODO - add a check on signature presence or download or end - postpone initialization of signatures DB here!! Do a graph search in the code for leaf nodes ! need to take in account offset in memory ! -- CODE TO REMOVE -- DEPRECATED -- # -- BEGIN TEST CODE -- #print "0x%08x (%02x) %-20s %s" % (i[0], i[1], i[3], i[2]) # -- END TEST CODE -- # --------------------------------------------------------------------------- # # MAIN SECTION OF CODE # --------------------------------------------------------------------------- # #TEST #sa.decompile() # TEST # Argument definition # create a configuration holder # Start the fun part :) # if force flag is defined, change behaviour # set YARA DB signature # TEST - save configuration for re-use #configuration.save() # set Vivisect path and Initialize # currently only vivisect is supported # this code need to be changed if other libraries get supported later # Check if an output directory is set # --------------------------------------------------------------------------- # # That's all folk ;) # --------------------------------------------------------------------------- # | 1.971655 | 2 |
mjml/elements/head/mj_style.py | ESA-CCI-ODP/mjml-stub | 23 | 10412 |
from ._head_base import HeadComponent
__all__ = ['MjStyle']
class MjStyle(HeadComponent):
@classmethod
def default_attrs(cls):
return {
'inline' : '',
}
def handler(self):
add = self.context['add']
inline_attr = 'inlineStyle' if (self.get_attr('inline') == 'inline') else 'style'
if inline_attr == 'inlineStyle':
raise NotImplementedError('style inlining not supported yet')
add(inline_attr, self.getContent())
|
from ._head_base import HeadComponent
__all__ = ['MjStyle']
class MjStyle(HeadComponent):
@classmethod
def default_attrs(cls):
return {
'inline' : '',
}
def handler(self):
add = self.context['add']
inline_attr = 'inlineStyle' if (self.get_attr('inline') == 'inline') else 'style'
if inline_attr == 'inlineStyle':
raise NotImplementedError('style inlining not supported yet')
add(inline_attr, self.getContent())
| none | 1 | 2.414489 | 2 |
|
model_zoo/official/nlp/bert/src/sample_process.py | i4oolish/mindspore | 2 | 10413 | <filename>model_zoo/official/nlp/bert/src/sample_process.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""process txt"""
import re
import json
def process_one_example_p(tokenizer, text, max_seq_len=128):
"""process one testline"""
textlist = list(text)
tokens = []
for _, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
if len(tokens) >= max_seq_len - 1:
tokens = tokens[0:(max_seq_len - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
for _, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
ntokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
ntokens.append("**NULL**")
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
feature = (input_ids, input_mask, segment_ids)
return feature
def label_generation(text="", probs=None, label2id_file=""):
"""generate label"""
data = [text]
probs = [probs]
result = []
label2id = json.loads(open(label2id_file).read())
id2label = [k for k, v in label2id.items()]
for index, prob in enumerate(probs):
for v in prob[1:len(data[index]) + 1]:
result.append(id2label[int(v)])
labels = {}
start = None
index = 0
for _, t in zip("".join(data), result):
if re.search("^[BS]", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = index
if re.search("^O", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = None
index += 1
if start is not None:
label = result[start][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
return labels
| <filename>model_zoo/official/nlp/bert/src/sample_process.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""process txt"""
import re
import json
def process_one_example_p(tokenizer, text, max_seq_len=128):
"""process one testline"""
textlist = list(text)
tokens = []
for _, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
if len(tokens) >= max_seq_len - 1:
tokens = tokens[0:(max_seq_len - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
for _, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
ntokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
ntokens.append("**NULL**")
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
feature = (input_ids, input_mask, segment_ids)
return feature
def label_generation(text="", probs=None, label2id_file=""):
"""generate label"""
data = [text]
probs = [probs]
result = []
label2id = json.loads(open(label2id_file).read())
id2label = [k for k, v in label2id.items()]
for index, prob in enumerate(probs):
for v in prob[1:len(data[index]) + 1]:
result.append(id2label[int(v)])
labels = {}
start = None
index = 0
for _, t in zip("".join(data), result):
if re.search("^[BS]", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = index
if re.search("^O", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
start = None
index += 1
if start is not None:
label = result[start][2:]
if labels.get(label):
te_ = text[start:index]
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
labels[label] = {te_: [[start, index - 1]]}
return labels
| en | 0.796274 | # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ process txt process one testline generate label | 2.58232 | 3 |
lang_model/data_loader.py | alex44jzy/FancyALMLDLNLP | 0 | 10414 | <gh_stars>0
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from gensim.corpora.dictionary import Dictionary
class LangDataset(Dataset):
def __init__(self, src_sents, trg_sents, max_len=-1):
self.src_sents = src_sents
self.trg_sents = trg_sents
# Create the vocabulary for both the source and target.
self.vocab = Dictionary(src_sents + trg_sents)
# Patch the vocabularies and add the <pad> and <unk> symbols.
special_tokens = {'<pad>': 0, '<unk>': 1, '</s>': 2}
self.vocab.patch_with_special_tokens(special_tokens)
# Keep track of how many data points.
self._len = len(src_sents)
if max_len < 0:
# If it's not set, find the longest text in the data.
max_src_len = max(len(sent) for sent in src_sents)
self.max_len = max_src_len
else:
self.max_len = max_len
def pad_sequence(self, vectorized_sent, max_len):
# To pad the sentence:
# Pad left = 0; Pad right = max_len - len of sent.
pad_dim = (0, max_len - len(vectorized_sent))
return F.pad(vectorized_sent, pad_dim, 'constant')
def __getitem__(self, index):
vectorized_src = self.vectorize(self.vocab, self.src_sents[index])
vectorized_trg = self.vectorize(self.vocab, self.trg_sents[index])
return {'x': self.pad_sequence(vectorized_src, self.max_len),
'y': self.pad_sequence(vectorized_trg, self.max_len),
'x_len': len(vectorized_src),
'y_len': len(vectorized_trg)}
def __len__(self):
return self._len
def vectorize(self, vocab, tokens):
"""
:param tokens: Tokens that should be vectorized.
:type tokens: list(str)
"""
# See https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2idx
# Lets just cast list of indices into torch tensors directly =)
return torch.tensor(vocab.doc2idx(tokens, unknown_word_index=1))
def unvectorize(self, vocab, indices):
"""
:param indices: Converts the indices back to tokens.
:type tokens: list(int)
"""
return [vocab[i] for i in indices]
| import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from gensim.corpora.dictionary import Dictionary
class LangDataset(Dataset):
def __init__(self, src_sents, trg_sents, max_len=-1):
self.src_sents = src_sents
self.trg_sents = trg_sents
# Create the vocabulary for both the source and target.
self.vocab = Dictionary(src_sents + trg_sents)
# Patch the vocabularies and add the <pad> and <unk> symbols.
special_tokens = {'<pad>': 0, '<unk>': 1, '</s>': 2}
self.vocab.patch_with_special_tokens(special_tokens)
# Keep track of how many data points.
self._len = len(src_sents)
if max_len < 0:
# If it's not set, find the longest text in the data.
max_src_len = max(len(sent) for sent in src_sents)
self.max_len = max_src_len
else:
self.max_len = max_len
def pad_sequence(self, vectorized_sent, max_len):
# To pad the sentence:
# Pad left = 0; Pad right = max_len - len of sent.
pad_dim = (0, max_len - len(vectorized_sent))
return F.pad(vectorized_sent, pad_dim, 'constant')
def __getitem__(self, index):
vectorized_src = self.vectorize(self.vocab, self.src_sents[index])
vectorized_trg = self.vectorize(self.vocab, self.trg_sents[index])
return {'x': self.pad_sequence(vectorized_src, self.max_len),
'y': self.pad_sequence(vectorized_trg, self.max_len),
'x_len': len(vectorized_src),
'y_len': len(vectorized_trg)}
def __len__(self):
return self._len
def vectorize(self, vocab, tokens):
"""
:param tokens: Tokens that should be vectorized.
:type tokens: list(str)
"""
# See https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2idx
# Lets just cast list of indices into torch tensors directly =)
return torch.tensor(vocab.doc2idx(tokens, unknown_word_index=1))
def unvectorize(self, vocab, indices):
"""
:param indices: Converts the indices back to tokens.
:type tokens: list(int)
"""
return [vocab[i] for i in indices] | en | 0.718332 | # Create the vocabulary for both the source and target. # Patch the vocabularies and add the <pad> and <unk> symbols. # Keep track of how many data points. # If it's not set, find the longest text in the data. # To pad the sentence: # Pad left = 0; Pad right = max_len - len of sent. :param tokens: Tokens that should be vectorized. :type tokens: list(str) # See https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2idx # Lets just cast list of indices into torch tensors directly =) :param indices: Converts the indices back to tokens. :type tokens: list(int) | 2.814436 | 3 |
models_nonconvex_simple2/ndcc13persp.py | grossmann-group/pyomo-MINLP-benchmarking | 0 | 10415 | <reponame>grossmann-group/pyomo-MINLP-benchmarking
# MINLP written by GAMS Convert at 08/20/20 01:30:45
#
# Equation counts
# Total E G L N X C B
# 297 170 42 85 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 673 631 42 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2479 2353 126 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x405 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x406 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x407 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x408 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x409 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x410 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x411 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x412 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x413 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x414 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x415 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x416 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x417 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x418 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x419 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x420 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x421 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x422 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x423 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x424 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x425 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x426 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x427 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x428 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x429 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x430 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x431 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x432 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x433 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x434 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x435 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x436 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b547 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b548 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b549 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b550 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b551 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b552 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b553 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b554 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b555 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b556 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b557 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b558 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b559 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b560 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b561 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b562 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b563 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b564 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b565 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b566 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b567 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b568 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b569 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b570 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b571 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b572 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b573 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b574 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b575 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b576 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b577 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b578 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b579 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b580 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b581 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b582 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b583 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b584 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b585 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b586 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b587 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b588 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x596 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x597 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x598 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x599 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x600 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x601 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x602 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x603 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x604 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x605 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x606 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x607 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x625 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr= 1.090016011*m.b547 + 3.10674202*m.b548 + 2.475702586*m.b549 + 1.966733944*m.b550
+ 1.090016011*m.b551 + 2.019536713*m.b552 + 3.10674202*m.b553 + 1.383540955*m.b554
+ 2.087059045*m.b555 + 3.720443668*m.b556 + 1.383540955*m.b557 + 1.794144217*m.b558
+ 3.50653318*m.b559 + 1.71812596*m.b560 + 3.834780538*m.b561 + 2.087059045*m.b562
+ 1.794144217*m.b563 + 2.239621249*m.b564 + 2.475702586*m.b565 + 2.019536713*m.b566
+ 3.720443668*m.b567 + 3.50653318*m.b568 + 2.239621249*m.b569 + 1.098732406*m.b570
+ 1.742557876*m.b571 + 1.098732406*m.b572 + 3.606882982*m.b573 + 1.71812596*m.b574
+ 2.074958698*m.b575 + 1.966733944*m.b576 + 2.074958698*m.b577 + 3.859970515*m.b578
+ 1.742557876*m.b579 + 3.859970515*m.b580 + 3.951460459*m.b581 + 3.834780538*m.b582
+ 3.606882982*m.b583 + 2.524064089*m.b584 + 2.524064089*m.b585 + 3.982701487*m.b586
+ 3.951460459*m.b587 + 3.982701487*m.b588, sense=minimize)
m.c2 = Constraint(expr= - m.x1 - m.x14 - m.x27 - m.x40 + m.x53 + m.x79 + m.x235 + m.x378 == -148)
m.c3 = Constraint(expr= - m.x2 - m.x15 - m.x28 - m.x41 + m.x54 + m.x80 + m.x236 + m.x379 == 12)
m.c4 = Constraint(expr= - m.x3 - m.x16 - m.x29 - m.x42 + m.x55 + m.x81 + m.x237 + m.x380 == 16)
m.c5 = Constraint(expr= - m.x4 - m.x17 - m.x30 - m.x43 + m.x56 + m.x82 + m.x238 + m.x381 == 21)
m.c6 = Constraint(expr= - m.x5 - m.x18 - m.x31 - m.x44 + m.x57 + m.x83 + m.x239 + m.x382 == 11)
m.c7 = Constraint(expr= - m.x6 - m.x19 - m.x32 - m.x45 + m.x58 + m.x84 + m.x240 + m.x383 == 24)
m.c8 = Constraint(expr= - m.x7 - m.x20 - m.x33 - m.x46 + m.x59 + m.x85 + m.x241 + m.x384 == 24)
m.c9 = Constraint(expr= - m.x8 - m.x21 - m.x34 - m.x47 + m.x60 + m.x86 + m.x242 + m.x385 == 8)
m.c10 = Constraint(expr= - m.x9 - m.x22 - m.x35 - m.x48 + m.x61 + m.x87 + m.x243 + m.x386 == 10)
m.c11 = Constraint(expr= - m.x10 - m.x23 - m.x36 - m.x49 + m.x62 + m.x88 + m.x244 + m.x387 == 18)
m.c12 = Constraint(expr= - m.x11 - m.x24 - m.x37 - m.x50 + m.x63 + m.x89 + m.x245 + m.x388 == 11)
m.c13 = Constraint(expr= - m.x12 - m.x25 - m.x38 - m.x51 + m.x64 + m.x90 + m.x246 + m.x389 == 20)
m.c14 = Constraint(expr= - m.x13 - m.x26 - m.x39 - m.x52 + m.x65 + m.x91 + m.x247 + m.x390 == 7)
m.c15 = Constraint(expr= m.x1 - m.x53 - m.x66 + m.x248 == 7)
m.c16 = Constraint(expr= m.x2 - m.x54 - m.x67 + m.x249 == -175)
m.c17 = Constraint(expr= m.x3 - m.x55 - m.x68 + m.x250 == 15)
m.c18 = Constraint(expr= m.x4 - m.x56 - m.x69 + m.x251 == 17)
m.c19 = Constraint(expr= m.x5 - m.x57 - m.x70 + m.x252 == 20)
m.c20 = Constraint(expr= m.x6 - m.x58 - m.x71 + m.x253 == 24)
m.c21 = Constraint(expr= m.x7 - m.x59 - m.x72 + m.x254 == 6)
m.c22 = Constraint(expr= m.x8 - m.x60 - m.x73 + m.x255 == 19)
m.c23 = Constraint(expr= m.x9 - m.x61 - m.x74 + m.x256 == 24)
m.c24 = Constraint(expr= m.x10 - m.x62 - m.x75 + m.x257 == 11)
m.c25 = Constraint(expr= m.x11 - m.x63 - m.x76 + m.x258 == 15)
m.c26 = Constraint(expr= m.x12 - m.x64 - m.x77 + m.x259 == 9)
m.c27 = Constraint(expr= m.x13 - m.x65 - m.x78 + m.x260 == 19)
m.c28 = Constraint(expr= m.x14 - m.x79 - m.x92 - m.x105 - m.x118 + m.x131 + m.x196 + m.x261 == 15)
m.c29 = Constraint(expr= m.x15 - m.x80 - m.x93 - m.x106 - m.x119 + m.x132 + m.x197 + m.x262 == 13)
m.c30 = Constraint(expr= m.x16 - m.x81 - m.x94 - m.x107 - m.x120 + m.x133 + m.x198 + m.x263 == -231)
m.c31 = Constraint(expr= m.x17 - m.x82 - m.x95 - m.x108 - m.x121 + m.x134 + m.x199 + m.x264 == 23)
m.c32 = Constraint(expr= m.x18 - m.x83 - m.x96 - m.x109 - m.x122 + m.x135 + m.x200 + m.x265 == 18)
m.c33 = Constraint(expr= m.x19 - m.x84 - m.x97 - m.x110 - m.x123 + m.x136 + m.x201 + m.x266 == 19)
m.c34 = Constraint(expr= m.x20 - m.x85 - m.x98 - m.x111 - m.x124 + m.x137 + m.x202 + m.x267 == 9)
m.c35 = Constraint(expr= m.x21 - m.x86 - m.x99 - m.x112 - m.x125 + m.x138 + m.x203 + m.x268 == 8)
m.c36 = Constraint(expr= m.x22 - m.x87 - m.x100 - m.x113 - m.x126 + m.x139 + m.x204 + m.x269 == 16)
m.c37 = Constraint(expr= m.x23 - m.x88 - m.x101 - m.x114 - m.x127 + m.x140 + m.x205 + m.x270 == 19)
m.c38 = Constraint(expr= m.x24 - m.x89 - m.x102 - m.x115 - m.x128 + m.x141 + m.x206 + m.x271 == 19)
m.c39 = Constraint(expr= m.x25 - m.x90 - m.x103 - m.x116 - m.x129 + m.x142 + m.x207 + m.x272 == 21)
m.c40 = Constraint(expr= m.x26 - m.x91 - m.x104 - m.x117 - m.x130 + m.x143 + m.x208 + m.x273 == 8)
m.c41 = Constraint(expr= m.x92 - m.x131 - m.x144 - m.x157 - m.x170 - m.x183 + m.x209 + m.x274 + m.x352 + m.x456 == 12)
m.c42 = Constraint(expr= m.x93 - m.x132 - m.x145 - m.x158 - m.x171 - m.x184 + m.x210 + m.x275 + m.x353 + m.x457 == 20)
m.c43 = Constraint(expr= m.x94 - m.x133 - m.x146 - m.x159 - m.x172 - m.x185 + m.x211 + m.x276 + m.x354 + m.x458 == 23)
m.c44 = Constraint(expr= m.x95 - m.x134 - m.x147 - m.x160 - m.x173 - m.x186 + m.x212 + m.x277 + m.x355 + m.x459
== -187)
m.c45 = Constraint(expr= m.x96 - m.x135 - m.x148 - m.x161 - m.x174 - m.x187 + m.x213 + m.x278 + m.x356 + m.x460 == 21)
m.c46 = Constraint(expr= m.x97 - m.x136 - m.x149 - m.x162 - m.x175 - m.x188 + m.x214 + m.x279 + m.x357 + m.x461 == 12)
m.c47 = Constraint(expr= m.x98 - m.x137 - m.x150 - m.x163 - m.x176 - m.x189 + m.x215 + m.x280 + m.x358 + m.x462 == 6)
m.c48 = Constraint(expr= m.x99 - m.x138 - m.x151 - m.x164 - m.x177 - m.x190 + m.x216 + m.x281 + m.x359 + m.x463 == 11)
m.c49 = Constraint(expr= m.x100 - m.x139 - m.x152 - m.x165 - m.x178 - m.x191 + m.x217 + m.x282 + m.x360 + m.x464
== 19)
m.c50 = Constraint(expr= m.x101 - m.x140 - m.x153 - m.x166 - m.x179 - m.x192 + m.x218 + m.x283 + m.x361 + m.x465 == 9)
m.c51 = Constraint(expr= m.x102 - m.x141 - m.x154 - m.x167 - m.x180 - m.x193 + m.x219 + m.x284 + m.x362 + m.x466
== 17)
m.c52 = Constraint(expr= m.x103 - m.x142 - m.x155 - m.x168 - m.x181 - m.x194 + m.x220 + m.x285 + m.x363 + m.x467
== 23)
m.c53 = Constraint(expr= m.x104 - m.x143 - m.x156 - m.x169 - m.x182 - m.x195 + m.x221 + m.x286 + m.x364 + m.x468
== 21)
m.c54 = Constraint(expr= m.x105 + m.x144 - m.x196 - m.x209 - m.x222 + m.x287 == 14)
m.c55 = Constraint(expr= m.x106 + m.x145 - m.x197 - m.x210 - m.x223 + m.x288 == 7)
m.c56 = Constraint(expr= m.x107 + m.x146 - m.x198 - m.x211 - m.x224 + m.x289 == 22)
m.c57 = Constraint(expr= m.x108 + m.x147 - m.x199 - m.x212 - m.x225 + m.x290 == 14)
m.c58 = Constraint(expr= m.x109 + m.x148 - m.x200 - m.x213 - m.x226 + m.x291 == -170)
m.c59 = Constraint(expr= m.x110 + m.x149 - m.x201 - m.x214 - m.x227 + m.x292 == 12)
m.c60 = Constraint(expr= m.x111 + m.x150 - m.x202 - m.x215 - m.x228 + m.x293 == 13)
m.c61 = Constraint(expr= m.x112 + m.x151 - m.x203 - m.x216 - m.x229 + m.x294 == 10)
m.c62 = Constraint(expr= m.x113 + m.x152 - m.x204 - m.x217 - m.x230 + m.x295 == 15)
m.c63 = Constraint(expr= m.x114 + m.x153 - m.x205 - m.x218 - m.x231 + m.x296 == 9)
m.c64 = Constraint(expr= m.x115 + m.x154 - m.x206 - m.x219 - m.x232 + m.x297 == 14)
m.c65 = Constraint(expr= m.x116 + m.x155 - m.x207 - m.x220 - m.x233 + m.x298 == 16)
m.c66 = Constraint(expr= m.x117 + m.x156 - m.x208 - m.x221 - m.x234 + m.x299 == 8)
m.c67 = Constraint(expr= m.x27 + m.x66 + m.x118 + m.x157 + m.x222 - m.x235 - m.x248 - m.x261 - m.x274 - m.x287
- m.x300 - m.x313 + m.x326 + m.x417 == 13)
m.c68 = Constraint(expr= m.x28 + m.x67 + m.x119 + m.x158 + m.x223 - m.x236 - m.x249 - m.x262 - m.x275 - m.x288
- m.x301 - m.x314 + m.x327 + m.x418 == 22)
m.c69 = Constraint(expr= m.x29 + m.x68 + m.x120 + m.x159 + m.x224 - m.x237 - m.x250 - m.x263 - m.x276 - m.x289
- m.x302 - m.x315 + m.x328 + m.x419 == 23)
m.c70 = Constraint(expr= m.x30 + m.x69 + m.x121 + m.x160 + m.x225 - m.x238 - m.x251 - m.x264 - m.x277 - m.x290
- m.x303 - m.x316 + m.x329 + m.x420 == 7)
m.c71 = Constraint(expr= m.x31 + m.x70 + m.x122 + m.x161 + m.x226 - m.x239 - m.x252 - m.x265 - m.x278 - m.x291
- m.x304 - m.x317 + m.x330 + m.x421 == 16)
m.c72 = Constraint(expr= m.x32 + m.x71 + m.x123 + m.x162 + m.x227 - m.x240 - m.x253 - m.x266 - m.x279 - m.x292
- m.x305 - m.x318 + m.x331 + m.x422 == -169)
m.c73 = Constraint(expr= m.x33 + m.x72 + m.x124 + m.x163 + m.x228 - m.x241 - m.x254 - m.x267 - m.x280 - m.x293
- m.x306 - m.x319 + m.x332 + m.x423 == 20)
m.c74 = Constraint(expr= m.x34 + m.x73 + m.x125 + m.x164 + m.x229 - m.x242 - m.x255 - m.x268 - m.x281 - m.x294
- m.x307 - m.x320 + m.x333 + m.x424 == 14)
m.c75 = Constraint(expr= m.x35 + m.x74 + m.x126 + m.x165 + m.x230 - m.x243 - m.x256 - m.x269 - m.x282 - m.x295
- m.x308 - m.x321 + m.x334 + m.x425 == 11)
m.c76 = Constraint(expr= m.x36 + m.x75 + m.x127 + m.x166 + m.x231 - m.x244 - m.x257 - m.x270 - m.x283 - m.x296
- m.x309 - m.x322 + m.x335 + m.x426 == 13)
m.c77 = Constraint(expr= m.x37 + m.x76 + m.x128 + m.x167 + m.x232 - m.x245 - m.x258 - m.x271 - m.x284 - m.x297
- m.x310 - m.x323 + m.x336 + m.x427 == 10)
m.c78 = Constraint(expr= m.x38 + m.x77 + m.x129 + m.x168 + m.x233 - m.x246 - m.x259 - m.x272 - m.x285 - m.x298
- m.x311 - m.x324 + m.x337 + m.x428 == 13)
m.c79 = Constraint(expr= m.x39 + m.x78 + m.x130 + m.x169 + m.x234 - m.x247 - m.x260 - m.x273 - m.x286 - m.x299
- m.x312 - m.x325 + m.x338 + m.x429 == 12)
m.c80 = Constraint(expr= m.x300 - m.x326 - m.x339 + m.x469 == 6)
m.c81 = Constraint(expr= m.x301 - m.x327 - m.x340 + m.x470 == 16)
m.c82 = Constraint(expr= m.x302 - m.x328 - m.x341 + m.x471 == 22)
m.c83 = Constraint(expr= m.x303 - m.x329 - m.x342 + m.x472 == 9)
m.c84 = Constraint(expr= m.x304 - m.x330 - m.x343 + m.x473 == 13)
m.c85 = Constraint(expr= m.x305 - m.x331 - m.x344 + m.x474 == 7)
m.c86 = Constraint(expr= m.x306 - m.x332 - m.x345 + m.x475 == -156)
m.c87 = Constraint(expr= m.x307 - m.x333 - m.x346 + m.x476 == 20)
m.c88 = Constraint(expr= m.x308 - m.x334 - m.x347 + m.x477 == 19)
m.c89 = Constraint(expr= m.x309 - m.x335 - m.x348 + m.x478 == 24)
m.c90 = Constraint(expr= m.x310 - m.x336 - m.x349 + m.x479 == 8)
m.c91 = Constraint(expr= m.x311 - m.x337 - m.x350 + m.x480 == 21)
m.c92 = Constraint(expr= m.x312 - m.x338 - m.x351 + m.x481 == 6)
m.c93 = Constraint(expr= m.x170 - m.x352 - m.x365 + m.x391 == 15)
m.c94 = Constraint(expr= m.x171 - m.x353 - m.x366 + m.x392 == 15)
m.c95 = Constraint(expr= m.x172 - m.x354 - m.x367 + m.x393 == 23)
m.c96 = Constraint(expr= m.x173 - m.x355 - m.x368 + m.x394 == 25)
m.c97 = Constraint(expr= m.x174 - m.x356 - m.x369 + m.x395 == 20)
m.c98 = Constraint(expr= m.x175 - m.x357 - m.x370 + m.x396 == 7)
m.c99 = Constraint(expr= m.x176 - m.x358 - m.x371 + m.x397 == 19)
m.c100 = Constraint(expr= m.x177 - m.x359 - m.x372 + m.x398 == -177)
m.c101 = Constraint(expr= m.x178 - m.x360 - m.x373 + m.x399 == 7)
m.c102 = Constraint(expr= m.x179 - m.x361 - m.x374 + m.x400 == 18)
m.c103 = Constraint(expr= m.x180 - m.x362 - m.x375 + m.x401 == 25)
m.c104 = Constraint(expr= m.x181 - m.x363 - m.x376 + m.x402 == 20)
m.c105 = Constraint(expr= m.x182 - m.x364 - m.x377 + m.x403 == 18)
m.c106 = Constraint(expr= m.x40 + m.x365 - m.x378 - m.x391 - m.x404 + m.x430 == 8)
m.c107 = Constraint(expr= m.x41 + m.x366 - m.x379 - m.x392 - m.x405 + m.x431 == 11)
m.c108 = Constraint(expr= m.x42 + m.x367 - m.x380 - m.x393 - m.x406 + m.x432 == 23)
m.c109 = Constraint(expr= m.x43 + m.x368 - m.x381 - m.x394 - m.x407 + m.x433 == 7)
m.c110 = Constraint(expr= m.x44 + m.x369 - m.x382 - m.x395 - m.x408 + m.x434 == 5)
m.c111 = Constraint(expr= m.x45 + m.x370 - m.x383 - m.x396 - m.x409 + m.x435 == 15)
m.c112 = Constraint(expr= m.x46 + m.x371 - m.x384 - m.x397 - m.x410 + m.x436 == 7)
m.c113 = Constraint(expr= m.x47 + m.x372 - m.x385 - m.x398 - m.x411 + m.x437 == 10)
m.c114 = Constraint(expr= m.x48 + m.x373 - m.x386 - m.x399 - m.x412 + m.x438 == -179)
m.c115 = Constraint(expr= m.x49 + m.x374 - m.x387 - m.x400 - m.x413 + m.x439 == 20)
m.c116 = Constraint(expr= m.x50 + m.x375 - m.x388 - m.x401 - m.x414 + m.x440 == 18)
m.c117 = Constraint(expr= m.x51 + m.x376 - m.x389 - m.x402 - m.x415 + m.x441 == 8)
m.c118 = Constraint(expr= m.x52 + m.x377 - m.x390 - m.x403 - m.x416 + m.x442 == 12)
m.c119 = Constraint(expr= m.x313 + m.x404 - m.x417 - m.x430 - m.x443 + m.x521 == 9)
m.c120 = Constraint(expr= m.x314 + m.x405 - m.x418 - m.x431 - m.x444 + m.x522 == 12)
m.c121 = Constraint(expr= m.x315 + m.x406 - m.x419 - m.x432 - m.x445 + m.x523 == 24)
m.c122 = Constraint(expr= m.x316 + m.x407 - m.x420 - m.x433 - m.x446 + m.x524 == 21)
m.c123 = Constraint(expr= m.x317 + m.x408 - m.x421 - m.x434 - m.x447 + m.x525 == 8)
m.c124 = Constraint(expr= m.x318 + m.x409 - m.x422 - m.x435 - m.x448 + m.x526 == 9)
m.c125 = Constraint(expr= m.x319 + m.x410 - m.x423 - m.x436 - m.x449 + m.x527 == 11)
m.c126 = Constraint(expr= m.x320 + m.x411 - m.x424 - m.x437 - m.x450 + m.x528 == 13)
m.c127 = Constraint(expr= m.x321 + m.x412 - m.x425 - m.x438 - m.x451 + m.x529 == 11)
m.c128 = Constraint(expr= m.x322 + m.x413 - m.x426 - m.x439 - m.x452 + m.x530 == -183)
m.c129 = Constraint(expr= m.x323 + m.x414 - m.x427 - m.x440 - m.x453 + m.x531 == 16)
m.c130 = Constraint(expr= m.x324 + m.x415 - m.x428 - m.x441 - m.x454 + m.x532 == 14)
m.c131 = Constraint(expr= m.x325 + m.x416 - m.x429 - m.x442 - m.x455 + m.x533 == 17)
m.c132 = Constraint(expr= m.x183 + m.x339 - m.x456 - m.x469 - m.x482 + m.x495 == 22)
m.c133 = Constraint(expr= m.x184 + m.x340 - m.x457 - m.x470 - m.x483 + m.x496 == 12)
m.c134 = Constraint(expr= m.x185 + m.x341 - m.x458 - m.x471 - m.x484 + m.x497 == 7)
m.c135 = Constraint(expr= m.x186 + m.x342 - m.x459 - m.x472 - m.x485 + m.x498 == 12)
m.c136 = Constraint(expr= m.x187 + m.x343 - m.x460 - m.x473 - m.x486 + m.x499 == 12)
m.c137 = Constraint(expr= m.x188 + m.x344 - m.x461 - m.x474 - m.x487 + m.x500 == 10)
m.c138 = Constraint(expr= m.x189 + m.x345 - m.x462 - m.x475 - m.x488 + m.x501 == 11)
m.c139 = Constraint(expr= m.x190 + m.x346 - m.x463 - m.x476 - m.x489 + m.x502 == 17)
m.c140 = Constraint(expr= m.x191 + m.x347 - m.x464 - m.x477 - m.x490 + m.x503 == 17)
m.c141 = Constraint(expr= m.x192 + m.x348 - m.x465 - m.x478 - m.x491 + m.x504 == 12)
m.c142 = Constraint(expr= m.x193 + m.x349 - m.x466 - m.x479 - m.x492 + m.x505 == -185)
m.c143 = Constraint(expr= m.x194 + m.x350 - m.x467 - m.x480 - m.x493 + m.x506 == 10)
m.c144 = Constraint(expr= m.x195 + m.x351 - m.x468 - m.x481 - m.x494 + m.x507 == 21)
m.c145 = Constraint(expr= m.x482 - m.x495 - m.x508 + m.x534 == 8)
m.c146 = Constraint(expr= m.x483 - m.x496 - m.x509 + m.x535 == 20)
m.c147 = Constraint(expr= m.x484 - m.x497 - m.x510 + m.x536 == 23)
m.c148 = Constraint(expr= m.x485 - m.x498 - m.x511 + m.x537 == 18)
m.c149 = Constraint(expr= m.x486 - m.x499 - m.x512 + m.x538 == 15)
m.c150 = Constraint(expr= m.x487 - m.x500 - m.x513 + m.x539 == 22)
m.c151 = Constraint(expr= m.x488 - m.x501 - m.x514 + m.x540 == 17)
m.c152 = Constraint(expr= m.x489 - m.x502 - m.x515 + m.x541 == 24)
m.c153 = Constraint(expr= m.x490 - m.x503 - m.x516 + m.x542 == 7)
m.c154 = Constraint(expr= m.x491 - m.x504 - m.x517 + m.x543 == 16)
m.c155 = Constraint(expr= m.x492 - m.x505 - m.x518 + m.x544 == 24)
m.c156 = Constraint(expr= m.x493 - m.x506 - m.x519 + m.x545 == -200)
m.c157 = Constraint(expr= m.x494 - m.x507 - m.x520 + m.x546 == 8)
m.c158 = Constraint(expr= m.x443 + m.x508 - m.x521 - m.x534 == 19)
m.c159 = Constraint(expr= m.x444 + m.x509 - m.x522 - m.x535 == 15)
m.c160 = Constraint(expr= m.x445 + m.x510 - m.x523 - m.x536 == 10)
m.c161 = Constraint(expr= m.x446 + m.x511 - m.x524 - m.x537 == 13)
m.c162 = Constraint(expr= m.x447 + m.x512 - m.x525 - m.x538 == 11)
m.c163 = Constraint(expr= m.x448 + m.x513 - m.x526 - m.x539 == 8)
m.c164 = Constraint(expr= m.x449 + m.x514 - m.x527 - m.x540 == 13)
m.c165 = Constraint(expr= m.x450 + m.x515 - m.x528 - m.x541 == 23)
m.c166 = Constraint(expr= m.x451 + m.x516 - m.x529 - m.x542 == 23)
m.c167 = Constraint(expr= m.x452 + m.x517 - m.x530 - m.x543 == 14)
m.c168 = Constraint(expr= m.x453 + m.x518 - m.x531 - m.x544 == 8)
m.c169 = Constraint(expr= m.x454 + m.x519 - m.x532 - m.x545 == 25)
m.c170 = Constraint(expr= m.x455 + m.x520 - m.x533 - m.x546 == -157)
m.c171 = Constraint(expr= - m.x1 - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 - m.x12 - m.x13
+ m.x632 >= 0)
m.c172 = Constraint(expr= - m.x14 - m.x15 - m.x16 - m.x17 - m.x18 - m.x19 - m.x20 - m.x21 - m.x22 - m.x23 - m.x24
- m.x25 - m.x26 + m.x633 >= 0)
m.c173 = Constraint(expr= - m.x27 - m.x28 - m.x29 - m.x30 - m.x31 - m.x32 - m.x33 - m.x34 - m.x35 - m.x36 - m.x37
- m.x38 - m.x39 + m.x634 >= 0)
m.c174 = Constraint(expr= - m.x40 - m.x41 - m.x42 - m.x43 - m.x44 - m.x45 - m.x46 - m.x47 - m.x48 - m.x49 - m.x50
- m.x51 - m.x52 + m.x635 >= 0)
m.c175 = Constraint(expr= - m.x53 - m.x54 - m.x55 - m.x56 - m.x57 - m.x58 - m.x59 - m.x60 - m.x61 - m.x62 - m.x63
- m.x64 - m.x65 + m.x636 >= 0)
m.c176 = Constraint(expr= - m.x66 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x72 - m.x73 - m.x74 - m.x75 - m.x76
- m.x77 - m.x78 + m.x637 >= 0)
m.c177 = Constraint(expr= - m.x79 - m.x80 - m.x81 - m.x82 - m.x83 - m.x84 - m.x85 - m.x86 - m.x87 - m.x88 - m.x89
- m.x90 - m.x91 + m.x638 >= 0)
m.c178 = Constraint(expr= - m.x92 - m.x93 - m.x94 - m.x95 - m.x96 - m.x97 - m.x98 - m.x99 - m.x100 - m.x101 - m.x102
- m.x103 - m.x104 + m.x639 >= 0)
m.c179 = Constraint(expr= - m.x105 - m.x106 - m.x107 - m.x108 - m.x109 - m.x110 - m.x111 - m.x112 - m.x113 - m.x114
- m.x115 - m.x116 - m.x117 + m.x640 >= 0)
m.c180 = Constraint(expr= - m.x118 - m.x119 - m.x120 - m.x121 - m.x122 - m.x123 - m.x124 - m.x125 - m.x126 - m.x127
- m.x128 - m.x129 - m.x130 + m.x641 >= 0)
m.c181 = Constraint(expr= - m.x131 - m.x132 - m.x133 - m.x134 - m.x135 - m.x136 - m.x137 - m.x138 - m.x139 - m.x140
- m.x141 - m.x142 - m.x143 + m.x642 >= 0)
m.c182 = Constraint(expr= - m.x144 - m.x145 - m.x146 - m.x147 - m.x148 - m.x149 - m.x150 - m.x151 - m.x152 - m.x153
- m.x154 - m.x155 - m.x156 + m.x643 >= 0)
m.c183 = Constraint(expr= - m.x157 - m.x158 - m.x159 - m.x160 - m.x161 - m.x162 - m.x163 - m.x164 - m.x165 - m.x166
- m.x167 - m.x168 - m.x169 + m.x644 >= 0)
m.c184 = Constraint(expr= - m.x170 - m.x171 - m.x172 - m.x173 - m.x174 - m.x175 - m.x176 - m.x177 - m.x178 - m.x179
- m.x180 - m.x181 - m.x182 + m.x645 >= 0)
m.c185 = Constraint(expr= - m.x183 - m.x184 - m.x185 - m.x186 - m.x187 - m.x188 - m.x189 - m.x190 - m.x191 - m.x192
- m.x193 - m.x194 - m.x195 + m.x646 >= 0)
m.c186 = Constraint(expr= - m.x196 - m.x197 - m.x198 - m.x199 - m.x200 - m.x201 - m.x202 - m.x203 - m.x204 - m.x205
- m.x206 - m.x207 - m.x208 + m.x647 >= 0)
m.c187 = Constraint(expr= - m.x209 - m.x210 - m.x211 - m.x212 - m.x213 - m.x214 - m.x215 - m.x216 - m.x217 - m.x218
- m.x219 - m.x220 - m.x221 + m.x648 >= 0)
m.c188 = Constraint(expr= - m.x222 - m.x223 - m.x224 - m.x225 - m.x226 - m.x227 - m.x228 - m.x229 - m.x230 - m.x231
- m.x232 - m.x233 - m.x234 + m.x649 >= 0)
m.c189 = Constraint(expr= - m.x235 - m.x236 - m.x237 - m.x238 - m.x239 - m.x240 - m.x241 - m.x242 - m.x243 - m.x244
- m.x245 - m.x246 - m.x247 + m.x650 >= 0)
m.c190 = Constraint(expr= - m.x248 - m.x249 - m.x250 - m.x251 - m.x252 - m.x253 - m.x254 - m.x255 - m.x256 - m.x257
- m.x258 - m.x259 - m.x260 + m.x651 >= 0)
m.c191 = Constraint(expr= - m.x261 - m.x262 - m.x263 - m.x264 - m.x265 - m.x266 - m.x267 - m.x268 - m.x269 - m.x270
- m.x271 - m.x272 - m.x273 + m.x652 >= 0)
m.c192 = Constraint(expr= - m.x274 - m.x275 - m.x276 - m.x277 - m.x278 - m.x279 - m.x280 - m.x281 - m.x282 - m.x283
- m.x284 - m.x285 - m.x286 + m.x653 >= 0)
m.c193 = Constraint(expr= - m.x287 - m.x288 - m.x289 - m.x290 - m.x291 - m.x292 - m.x293 - m.x294 - m.x295 - m.x296
- m.x297 - m.x298 - m.x299 + m.x654 >= 0)
m.c194 = Constraint(expr= - m.x300 - m.x301 - m.x302 - m.x303 - m.x304 - m.x305 - m.x306 - m.x307 - m.x308 - m.x309
- m.x310 - m.x311 - m.x312 + m.x655 >= 0)
m.c195 = Constraint(expr= - m.x313 - m.x314 - m.x315 - m.x316 - m.x317 - m.x318 - m.x319 - m.x320 - m.x321 - m.x322
- m.x323 - m.x324 - m.x325 + m.x656 >= 0)
m.c196 = Constraint(expr= - m.x326 - m.x327 - m.x328 - m.x329 - m.x330 - m.x331 - m.x332 - m.x333 - m.x334 - m.x335
- m.x336 - m.x337 - m.x338 + m.x657 >= 0)
m.c197 = Constraint(expr= - m.x339 - m.x340 - m.x341 - m.x342 - m.x343 - m.x344 - m.x345 - m.x346 - m.x347 - m.x348
- m.x349 - m.x350 - m.x351 + m.x658 >= 0)
m.c198 = Constraint(expr= - m.x352 - m.x353 - m.x354 - m.x355 - m.x356 - m.x357 - m.x358 - m.x359 - m.x360 - m.x361
- m.x362 - m.x363 - m.x364 + m.x659 >= 0)
m.c199 = Constraint(expr= - m.x365 - m.x366 - m.x367 - m.x368 - m.x369 - m.x370 - m.x371 - m.x372 - m.x373 - m.x374
- m.x375 - m.x376 - m.x377 + m.x660 >= 0)
m.c200 = Constraint(expr= - m.x378 - m.x379 - m.x380 - m.x381 - m.x382 - m.x383 - m.x384 - m.x385 - m.x386 - m.x387
- m.x388 - m.x389 - m.x390 + m.x661 >= 0)
m.c201 = Constraint(expr= - m.x391 - m.x392 - m.x393 - m.x394 - m.x395 - m.x396 - m.x397 - m.x398 - m.x399 - m.x400
- m.x401 - m.x402 - m.x403 + m.x662 >= 0)
m.c202 = Constraint(expr= - m.x404 - m.x405 - m.x406 - m.x407 - m.x408 - m.x409 - m.x410 - m.x411 - m.x412 - m.x413
- m.x414 - m.x415 - m.x416 + m.x663 >= 0)
m.c203 = Constraint(expr= - m.x417 - m.x418 - m.x419 - m.x420 - m.x421 - m.x422 - m.x423 - m.x424 - m.x425 - m.x426
- m.x427 - m.x428 - m.x429 + m.x664 >= 0)
m.c204 = Constraint(expr= - m.x430 - m.x431 - m.x432 - m.x433 - m.x434 - m.x435 - m.x436 - m.x437 - m.x438 - m.x439
- m.x440 - m.x441 - m.x442 + m.x665 >= 0)
m.c205 = Constraint(expr= - m.x443 - m.x444 - m.x445 - m.x446 - m.x447 - m.x448 - m.x449 - m.x450 - m.x451 - m.x452
- m.x453 - m.x454 - m.x455 + m.x666 >= 0)
m.c206 = Constraint(expr= - m.x456 - m.x457 - m.x458 - m.x459 - m.x460 - m.x461 - m.x462 - m.x463 - m.x464 - m.x465
- m.x466 - m.x467 - m.x468 + m.x667 >= 0)
m.c207 = Constraint(expr= - m.x469 - m.x470 - m.x471 - m.x472 - m.x473 - m.x474 - m.x475 - m.x476 - m.x477 - m.x478
- m.x479 - m.x480 - m.x481 + m.x668 >= 0)
m.c208 = Constraint(expr= - m.x482 - m.x483 - m.x484 - m.x485 - m.x486 - m.x487 - m.x488 - m.x489 - m.x490 - m.x491
- m.x492 - m.x493 - m.x494 + m.x669 >= 0)
m.c209 = Constraint(expr= - m.x495 - m.x496 - m.x497 - m.x498 - m.x499 - m.x500 - m.x501 - m.x502 - m.x503 - m.x504
- m.x505 - m.x506 - m.x507 + m.x670 >= 0)
m.c210 = Constraint(expr= - m.x508 - m.x509 - m.x510 - m.x511 - m.x512 - m.x513 - m.x514 - m.x515 - m.x516 - m.x517
- m.x518 - m.x519 - m.x520 + m.x671 >= 0)
m.c211 = Constraint(expr= - m.x521 - m.x522 - m.x523 - m.x524 - m.x525 - m.x526 - m.x527 - m.x528 - m.x529 - m.x530
- m.x531 - m.x532 - m.x533 + m.x672 >= 0)
m.c212 = Constraint(expr= - m.x534 - m.x535 - m.x536 - m.x537 - m.x538 - m.x539 - m.x540 - m.x541 - m.x542 - m.x543
- m.x544 - m.x545 - m.x546 + m.x673 >= 0)
m.c213 = Constraint(expr=166*m.x632*m.b547 - 166*m.b547*m.x589 + m.x632*m.x589 <= 0)
m.c214 = Constraint(expr=463*m.x633*m.b548 - 463*m.b548*m.x590 + m.x633*m.x590 <= 0)
m.c215 = Constraint(expr=522*m.x634*m.b549 - 522*m.b549*m.x591 + m.x634*m.x591 <= 0)
m.c216 = Constraint(expr=141*m.x635*m.b550 - 141*m.b550*m.x592 + m.x635*m.x592 <= 0)
m.c217 = Constraint(expr=166*m.x636*m.b551 - 166*m.b551*m.x593 + m.x636*m.x593 <= 0)
m.c218 = Constraint(expr=265*m.x637*m.b552 - 265*m.b552*m.x594 + m.x637*m.x594 <= 0)
m.c219 = Constraint(expr=463*m.x638*m.b553 - 463*m.b553*m.x595 + m.x638*m.x595 <= 0)
m.c220 = Constraint(expr=456*m.x639*m.b554 - 456*m.b554*m.x596 + m.x639*m.x596 <= 0)
m.c221 = Constraint(expr=526*m.x640*m.b555 - 526*m.b555*m.x597 + m.x640*m.x597 <= 0)
m.c222 = Constraint(expr=152*m.x641*m.b556 - 152*m.b556*m.x598 + m.x641*m.x598 <= 0)
m.c223 = Constraint(expr=456*m.x642*m.b557 - 456*m.b557*m.x599 + m.x642*m.x599 <= 0)
m.c224 = Constraint(expr=384*m.x643*m.b558 - 384*m.b558*m.x600 + m.x643*m.x600 <= 0)
m.c225 = Constraint(expr=441*m.x644*m.b559 - 441*m.b559*m.x601 + m.x644*m.x601 <= 0)
m.c226 = Constraint(expr=309*m.x645*m.b560 - 309*m.b560*m.x602 + m.x645*m.x602 <= 0)
m.c227 = Constraint(expr=233*m.x646*m.b561 - 233*m.b561*m.x603 + m.x646*m.x603 <= 0)
m.c228 = Constraint(expr=526*m.x647*m.b562 - 526*m.b562*m.x604 + m.x647*m.x604 <= 0)
m.c229 = Constraint(expr=384*m.x648*m.b563 - 384*m.b563*m.x605 + m.x648*m.x605 <= 0)
m.c230 = Constraint(expr=203*m.x649*m.b564 - 203*m.b564*m.x606 + m.x649*m.x606 <= 0)
m.c231 = Constraint(expr=522*m.x650*m.b565 - 522*m.b565*m.x607 + m.x650*m.x607 <= 0)
m.c232 = Constraint(expr=265*m.x651*m.b566 - 265*m.b566*m.x608 + m.x651*m.x608 <= 0)
m.c233 = Constraint(expr=152*m.x652*m.b567 - 152*m.b567*m.x609 + m.x652*m.x609 <= 0)
m.c234 = Constraint(expr=441*m.x653*m.b568 - 441*m.b568*m.x610 + m.x653*m.x610 <= 0)
m.c235 = Constraint(expr=203*m.x654*m.b569 - 203*m.b569*m.x611 + m.x654*m.x611 <= 0)
m.c236 = Constraint(expr=284*m.x655*m.b570 - 284*m.b570*m.x612 + m.x655*m.x612 <= 0)
m.c237 = Constraint(expr=426*m.x656*m.b571 - 426*m.b571*m.x613 + m.x656*m.x613 <= 0)
m.c238 = Constraint(expr=284*m.x657*m.b572 - 284*m.b572*m.x614 + m.x657*m.x614 <= 0)
m.c239 = Constraint(expr=109*m.x658*m.b573 - 109*m.b573*m.x615 + m.x658*m.x615 <= 0)
m.c240 = Constraint(expr=309*m.x659*m.b574 - 309*m.b574*m.x616 + m.x659*m.x616 <= 0)
m.c241 = Constraint(expr=434*m.x660*m.b575 - 434*m.b575*m.x617 + m.x660*m.x617 <= 0)
m.c242 = Constraint(expr=141*m.x661*m.b576 - 141*m.b576*m.x618 + m.x661*m.x618 <= 0)
m.c243 = Constraint(expr=434*m.x662*m.b577 - 434*m.b577*m.x619 + m.x662*m.x619 <= 0)
m.c244 = Constraint(expr=403*m.x663*m.b578 - 403*m.b578*m.x620 + m.x663*m.x620 <= 0)
m.c245 = Constraint(expr=426*m.x664*m.b579 - 426*m.b579*m.x621 + m.x664*m.x621 <= 0)
m.c246 = Constraint(expr=403*m.x665*m.b580 - 403*m.b580*m.x622 + m.x665*m.x622 <= 0)
m.c247 = Constraint(expr=151*m.x666*m.b581 - 151*m.b581*m.x623 + m.x666*m.x623 <= 0)
m.c248 = Constraint(expr=233*m.x667*m.b582 - 233*m.b582*m.x624 + m.x667*m.x624 <= 0)
m.c249 = Constraint(expr=109*m.x668*m.b583 - 109*m.b583*m.x625 + m.x668*m.x625 <= 0)
m.c250 = Constraint(expr=367*m.x669*m.b584 - 367*m.b584*m.x626 + m.x669*m.x626 <= 0)
m.c251 = Constraint(expr=367*m.x670*m.b585 - 367*m.b585*m.x627 + m.x670*m.x627 <= 0)
m.c252 = Constraint(expr=382*m.x671*m.b586 - 382*m.b586*m.x628 + m.x671*m.x628 <= 0)
m.c253 = Constraint(expr=151*m.x672*m.b587 - 151*m.b587*m.x629 + m.x672*m.x629 <= 0)
m.c254 = Constraint(expr=382*m.x673*m.b588 - 382*m.b588*m.x630 + m.x673*m.x630 <= 0)
m.c255 = Constraint(expr= m.x589 + m.x590 + m.x591 + m.x592 + m.x593 + m.x594 + m.x595 + m.x596 + m.x597 + m.x598
+ m.x599 + m.x600 + m.x601 + m.x602 + m.x603 + m.x604 + m.x605 + m.x606 + m.x607 + m.x608
+ m.x609 + m.x610 + m.x611 + m.x612 + m.x613 + m.x614 + m.x615 + m.x616 + m.x617 + m.x618
+ m.x619 + m.x620 + m.x621 + m.x622 + m.x623 + m.x624 + m.x625 + m.x626 + m.x627 + m.x628
+ m.x629 + m.x630 <= 18536)
m.c256 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x7 + m.x8 + m.x9 + m.x10 + m.x11 + m.x12 + m.x13
- 166*m.b547 <= 0)
m.c257 = Constraint(expr= m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x25 + m.x26 - 463*m.b548 <= 0)
m.c258 = Constraint(expr= m.x27 + m.x28 + m.x29 + m.x30 + m.x31 + m.x32 + m.x33 + m.x34 + m.x35 + m.x36 + m.x37
+ m.x38 + m.x39 - 522*m.b549 <= 0)
m.c259 = Constraint(expr= m.x40 + m.x41 + m.x42 + m.x43 + m.x44 + m.x45 + m.x46 + m.x47 + m.x48 + m.x49 + m.x50
+ m.x51 + m.x52 - 141*m.b550 <= 0)
m.c260 = Constraint(expr= m.x53 + m.x54 + m.x55 + m.x56 + m.x57 + m.x58 + m.x59 + m.x60 + m.x61 + m.x62 + m.x63
+ m.x64 + m.x65 - 166*m.b551 <= 0)
m.c261 = Constraint(expr= m.x66 + m.x67 + m.x68 + m.x69 + m.x70 + m.x71 + m.x72 + m.x73 + m.x74 + m.x75 + m.x76
+ m.x77 + m.x78 - 265*m.b552 <= 0)
m.c262 = Constraint(expr= m.x79 + m.x80 + m.x81 + m.x82 + m.x83 + m.x84 + m.x85 + m.x86 + m.x87 + m.x88 + m.x89
+ m.x90 + m.x91 - 463*m.b553 <= 0)
m.c263 = Constraint(expr= m.x92 + m.x93 + m.x94 + m.x95 + m.x96 + m.x97 + m.x98 + m.x99 + m.x100 + m.x101 + m.x102
+ m.x103 + m.x104 - 456*m.b554 <= 0)
m.c264 = Constraint(expr= m.x105 + m.x106 + m.x107 + m.x108 + m.x109 + m.x110 + m.x111 + m.x112 + m.x113 + m.x114
+ m.x115 + m.x116 + m.x117 - 526*m.b555 <= 0)
m.c265 = Constraint(expr= m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x123 + m.x124 + m.x125 + m.x126 + m.x127
+ m.x128 + m.x129 + m.x130 - 152*m.b556 <= 0)
m.c266 = Constraint(expr= m.x131 + m.x132 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138 + m.x139 + m.x140
+ m.x141 + m.x142 + m.x143 - 456*m.b557 <= 0)
m.c267 = Constraint(expr= m.x144 + m.x145 + m.x146 + m.x147 + m.x148 + m.x149 + m.x150 + m.x151 + m.x152 + m.x153
+ m.x154 + m.x155 + m.x156 - 384*m.b558 <= 0)
m.c268 = Constraint(expr= m.x157 + m.x158 + m.x159 + m.x160 + m.x161 + m.x162 + m.x163 + m.x164 + m.x165 + m.x166
+ m.x167 + m.x168 + m.x169 - 441*m.b559 <= 0)
m.c269 = Constraint(expr= m.x170 + m.x171 + m.x172 + m.x173 + m.x174 + m.x175 + m.x176 + m.x177 + m.x178 + m.x179
+ m.x180 + m.x181 + m.x182 - 309*m.b560 <= 0)
m.c270 = Constraint(expr= m.x183 + m.x184 + m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192
+ m.x193 + m.x194 + m.x195 - 233*m.b561 <= 0)
m.c271 = Constraint(expr= m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202 + m.x203 + m.x204 + m.x205
+ m.x206 + m.x207 + m.x208 - 526*m.b562 <= 0)
m.c272 = Constraint(expr= m.x209 + m.x210 + m.x211 + m.x212 + m.x213 + m.x214 + m.x215 + m.x216 + m.x217 + m.x218
+ m.x219 + m.x220 + m.x221 - 384*m.b563 <= 0)
m.c273 = Constraint(expr= m.x222 + m.x223 + m.x224 + m.x225 + m.x226 + m.x227 + m.x228 + m.x229 + m.x230 + m.x231
+ m.x232 + m.x233 + m.x234 - 203*m.b564 <= 0)
m.c274 = Constraint(expr= m.x235 + m.x236 + m.x237 + m.x238 + m.x239 + m.x240 + m.x241 + m.x242 + m.x243 + m.x244
+ m.x245 + m.x246 + m.x247 - 522*m.b565 <= 0)
m.c275 = Constraint(expr= m.x248 + m.x249 + m.x250 + m.x251 + m.x252 + m.x253 + m.x254 + m.x255 + m.x256 + m.x257
+ m.x258 + m.x259 + m.x260 - 265*m.b566 <= 0)
m.c276 = Constraint(expr= m.x261 + m.x262 + m.x263 + m.x264 + m.x265 + m.x266 + m.x267 + m.x268 + m.x269 + m.x270
+ m.x271 + m.x272 + m.x273 - 152*m.b567 <= 0)
m.c277 = Constraint(expr= m.x274 + m.x275 + m.x276 + m.x277 + m.x278 + m.x279 + m.x280 + m.x281 + m.x282 + m.x283
+ m.x284 + m.x285 + m.x286 - 441*m.b568 <= 0)
m.c278 = Constraint(expr= m.x287 + m.x288 + m.x289 + m.x290 + m.x291 + m.x292 + m.x293 + m.x294 + m.x295 + m.x296
+ m.x297 + m.x298 + m.x299 - 203*m.b569 <= 0)
m.c279 = Constraint(expr= m.x300 + m.x301 + m.x302 + m.x303 + m.x304 + m.x305 + m.x306 + m.x307 + m.x308 + m.x309
+ m.x310 + m.x311 + m.x312 - 284*m.b570 <= 0)
m.c280 = Constraint(expr= m.x313 + m.x314 + m.x315 + m.x316 + m.x317 + m.x318 + m.x319 + m.x320 + m.x321 + m.x322
+ m.x323 + m.x324 + m.x325 - 426*m.b571 <= 0)
m.c281 = Constraint(expr= m.x326 + m.x327 + m.x328 + m.x329 + m.x330 + m.x331 + m.x332 + m.x333 + m.x334 + m.x335
+ m.x336 + m.x337 + m.x338 - 284*m.b572 <= 0)
m.c282 = Constraint(expr= m.x339 + m.x340 + m.x341 + m.x342 + m.x343 + m.x344 + m.x345 + m.x346 + m.x347 + m.x348
+ m.x349 + m.x350 + m.x351 - 109*m.b573 <= 0)
m.c283 = Constraint(expr= m.x352 + m.x353 + m.x354 + m.x355 + m.x356 + m.x357 + m.x358 + m.x359 + m.x360 + m.x361
+ m.x362 + m.x363 + m.x364 - 309*m.b574 <= 0)
m.c284 = Constraint(expr= m.x365 + m.x366 + m.x367 + m.x368 + m.x369 + m.x370 + m.x371 + m.x372 + m.x373 + m.x374
+ m.x375 + m.x376 + m.x377 - 434*m.b575 <= 0)
m.c285 = Constraint(expr= m.x378 + m.x379 + m.x380 + m.x381 + m.x382 + m.x383 + m.x384 + m.x385 + m.x386 + m.x387
+ m.x388 + m.x389 + m.x390 - 141*m.b576 <= 0)
m.c286 = Constraint(expr= m.x391 + m.x392 + m.x393 + m.x394 + m.x395 + m.x396 + m.x397 + m.x398 + m.x399 + m.x400
+ m.x401 + m.x402 + m.x403 - 434*m.b577 <= 0)
m.c287 = Constraint(expr= m.x404 + m.x405 + m.x406 + m.x407 + m.x408 + m.x409 + m.x410 + m.x411 + m.x412 + m.x413
+ m.x414 + m.x415 + m.x416 - 403*m.b578 <= 0)
m.c288 = Constraint(expr= m.x417 + m.x418 + m.x419 + m.x420 + m.x421 + m.x422 + m.x423 + m.x424 + m.x425 + m.x426
+ m.x427 + m.x428 + m.x429 - 426*m.b579 <= 0)
m.c289 = Constraint(expr= m.x430 + m.x431 + m.x432 + m.x433 + m.x434 + m.x435 + m.x436 + m.x437 + m.x438 + m.x439
+ m.x440 + m.x441 + m.x442 - 403*m.b580 <= 0)
m.c290 = Constraint(expr= m.x443 + m.x444 + m.x445 + m.x446 + m.x447 + m.x448 + m.x449 + m.x450 + m.x451 + m.x452
+ m.x453 + m.x454 + m.x455 - 151*m.b581 <= 0)
m.c291 = Constraint(expr= m.x456 + m.x457 + m.x458 + m.x459 + m.x460 + m.x461 + m.x462 + m.x463 + m.x464 + m.x465
+ m.x466 + m.x467 + m.x468 - 233*m.b582 <= 0)
m.c292 = Constraint(expr= m.x469 + m.x470 + m.x471 + m.x472 + m.x473 + m.x474 + m.x475 + m.x476 + m.x477 + m.x478
+ m.x479 + m.x480 + m.x481 - 109*m.b583 <= 0)
m.c293 = Constraint(expr= m.x482 + m.x483 + m.x484 + m.x485 + m.x486 + m.x487 + m.x488 + m.x489 + m.x490 + m.x491
+ m.x492 + m.x493 + m.x494 - 367*m.b584 <= 0)
m.c294 = Constraint(expr= m.x495 + m.x496 + m.x497 + m.x498 + m.x499 + m.x500 + m.x501 + m.x502 + m.x503 + m.x504
+ m.x505 + m.x506 + m.x507 - 367*m.b585 <= 0)
m.c295 = Constraint(expr= m.x508 + m.x509 + m.x510 + m.x511 + m.x512 + m.x513 + m.x514 + m.x515 + m.x516 + m.x517
+ m.x518 + m.x519 + m.x520 - 382*m.b586 <= 0)
m.c296 = Constraint(expr= m.x521 + m.x522 + m.x523 + m.x524 + m.x525 + m.x526 + m.x527 + m.x528 + m.x529 + m.x530
+ m.x531 + m.x532 + m.x533 - 151*m.b587 <= 0)
m.c297 = Constraint(expr= m.x534 + m.x535 + m.x536 + m.x537 + m.x538 + m.x539 + m.x540 + m.x541 + m.x542 + m.x543
+ m.x544 + m.x545 + m.x546 - 382*m.b588 <= 0)
| # MINLP written by GAMS Convert at 08/20/20 01:30:45
#
# Equation counts
# Total E G L N X C B
# 297 170 42 85 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 673 631 42 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2479 2353 126 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x405 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x406 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x407 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x408 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x409 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x410 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x411 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x412 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x413 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x414 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x415 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x416 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x417 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x418 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x419 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x420 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x421 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x422 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x423 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x424 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x425 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x426 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x427 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x428 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x429 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x430 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x431 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x432 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x433 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x434 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x435 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x436 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b547 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b548 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b549 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b550 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b551 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b552 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b553 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b554 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b555 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b556 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b557 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b558 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b559 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b560 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b561 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b562 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b563 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b564 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b565 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b566 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b567 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b568 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b569 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b570 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b571 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b572 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b573 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b574 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b575 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b576 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b577 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b578 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b579 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b580 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b581 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b582 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b583 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b584 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b585 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b586 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b587 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b588 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x596 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x597 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x598 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x599 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x600 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x601 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x602 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x603 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x604 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x605 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x606 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x607 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x625 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr= 1.090016011*m.b547 + 3.10674202*m.b548 + 2.475702586*m.b549 + 1.966733944*m.b550
+ 1.090016011*m.b551 + 2.019536713*m.b552 + 3.10674202*m.b553 + 1.383540955*m.b554
+ 2.087059045*m.b555 + 3.720443668*m.b556 + 1.383540955*m.b557 + 1.794144217*m.b558
+ 3.50653318*m.b559 + 1.71812596*m.b560 + 3.834780538*m.b561 + 2.087059045*m.b562
+ 1.794144217*m.b563 + 2.239621249*m.b564 + 2.475702586*m.b565 + 2.019536713*m.b566
+ 3.720443668*m.b567 + 3.50653318*m.b568 + 2.239621249*m.b569 + 1.098732406*m.b570
+ 1.742557876*m.b571 + 1.098732406*m.b572 + 3.606882982*m.b573 + 1.71812596*m.b574
+ 2.074958698*m.b575 + 1.966733944*m.b576 + 2.074958698*m.b577 + 3.859970515*m.b578
+ 1.742557876*m.b579 + 3.859970515*m.b580 + 3.951460459*m.b581 + 3.834780538*m.b582
+ 3.606882982*m.b583 + 2.524064089*m.b584 + 2.524064089*m.b585 + 3.982701487*m.b586
+ 3.951460459*m.b587 + 3.982701487*m.b588, sense=minimize)
m.c2 = Constraint(expr= - m.x1 - m.x14 - m.x27 - m.x40 + m.x53 + m.x79 + m.x235 + m.x378 == -148)
m.c3 = Constraint(expr= - m.x2 - m.x15 - m.x28 - m.x41 + m.x54 + m.x80 + m.x236 + m.x379 == 12)
m.c4 = Constraint(expr= - m.x3 - m.x16 - m.x29 - m.x42 + m.x55 + m.x81 + m.x237 + m.x380 == 16)
m.c5 = Constraint(expr= - m.x4 - m.x17 - m.x30 - m.x43 + m.x56 + m.x82 + m.x238 + m.x381 == 21)
m.c6 = Constraint(expr= - m.x5 - m.x18 - m.x31 - m.x44 + m.x57 + m.x83 + m.x239 + m.x382 == 11)
m.c7 = Constraint(expr= - m.x6 - m.x19 - m.x32 - m.x45 + m.x58 + m.x84 + m.x240 + m.x383 == 24)
m.c8 = Constraint(expr= - m.x7 - m.x20 - m.x33 - m.x46 + m.x59 + m.x85 + m.x241 + m.x384 == 24)
m.c9 = Constraint(expr= - m.x8 - m.x21 - m.x34 - m.x47 + m.x60 + m.x86 + m.x242 + m.x385 == 8)
m.c10 = Constraint(expr= - m.x9 - m.x22 - m.x35 - m.x48 + m.x61 + m.x87 + m.x243 + m.x386 == 10)
m.c11 = Constraint(expr= - m.x10 - m.x23 - m.x36 - m.x49 + m.x62 + m.x88 + m.x244 + m.x387 == 18)
m.c12 = Constraint(expr= - m.x11 - m.x24 - m.x37 - m.x50 + m.x63 + m.x89 + m.x245 + m.x388 == 11)
m.c13 = Constraint(expr= - m.x12 - m.x25 - m.x38 - m.x51 + m.x64 + m.x90 + m.x246 + m.x389 == 20)
m.c14 = Constraint(expr= - m.x13 - m.x26 - m.x39 - m.x52 + m.x65 + m.x91 + m.x247 + m.x390 == 7)
m.c15 = Constraint(expr= m.x1 - m.x53 - m.x66 + m.x248 == 7)
m.c16 = Constraint(expr= m.x2 - m.x54 - m.x67 + m.x249 == -175)
m.c17 = Constraint(expr= m.x3 - m.x55 - m.x68 + m.x250 == 15)
m.c18 = Constraint(expr= m.x4 - m.x56 - m.x69 + m.x251 == 17)
m.c19 = Constraint(expr= m.x5 - m.x57 - m.x70 + m.x252 == 20)
m.c20 = Constraint(expr= m.x6 - m.x58 - m.x71 + m.x253 == 24)
m.c21 = Constraint(expr= m.x7 - m.x59 - m.x72 + m.x254 == 6)
m.c22 = Constraint(expr= m.x8 - m.x60 - m.x73 + m.x255 == 19)
m.c23 = Constraint(expr= m.x9 - m.x61 - m.x74 + m.x256 == 24)
m.c24 = Constraint(expr= m.x10 - m.x62 - m.x75 + m.x257 == 11)
m.c25 = Constraint(expr= m.x11 - m.x63 - m.x76 + m.x258 == 15)
m.c26 = Constraint(expr= m.x12 - m.x64 - m.x77 + m.x259 == 9)
m.c27 = Constraint(expr= m.x13 - m.x65 - m.x78 + m.x260 == 19)
m.c28 = Constraint(expr= m.x14 - m.x79 - m.x92 - m.x105 - m.x118 + m.x131 + m.x196 + m.x261 == 15)
m.c29 = Constraint(expr= m.x15 - m.x80 - m.x93 - m.x106 - m.x119 + m.x132 + m.x197 + m.x262 == 13)
m.c30 = Constraint(expr= m.x16 - m.x81 - m.x94 - m.x107 - m.x120 + m.x133 + m.x198 + m.x263 == -231)
m.c31 = Constraint(expr= m.x17 - m.x82 - m.x95 - m.x108 - m.x121 + m.x134 + m.x199 + m.x264 == 23)
m.c32 = Constraint(expr= m.x18 - m.x83 - m.x96 - m.x109 - m.x122 + m.x135 + m.x200 + m.x265 == 18)
m.c33 = Constraint(expr= m.x19 - m.x84 - m.x97 - m.x110 - m.x123 + m.x136 + m.x201 + m.x266 == 19)
m.c34 = Constraint(expr= m.x20 - m.x85 - m.x98 - m.x111 - m.x124 + m.x137 + m.x202 + m.x267 == 9)
m.c35 = Constraint(expr= m.x21 - m.x86 - m.x99 - m.x112 - m.x125 + m.x138 + m.x203 + m.x268 == 8)
m.c36 = Constraint(expr= m.x22 - m.x87 - m.x100 - m.x113 - m.x126 + m.x139 + m.x204 + m.x269 == 16)
m.c37 = Constraint(expr= m.x23 - m.x88 - m.x101 - m.x114 - m.x127 + m.x140 + m.x205 + m.x270 == 19)
m.c38 = Constraint(expr= m.x24 - m.x89 - m.x102 - m.x115 - m.x128 + m.x141 + m.x206 + m.x271 == 19)
m.c39 = Constraint(expr= m.x25 - m.x90 - m.x103 - m.x116 - m.x129 + m.x142 + m.x207 + m.x272 == 21)
m.c40 = Constraint(expr= m.x26 - m.x91 - m.x104 - m.x117 - m.x130 + m.x143 + m.x208 + m.x273 == 8)
m.c41 = Constraint(expr= m.x92 - m.x131 - m.x144 - m.x157 - m.x170 - m.x183 + m.x209 + m.x274 + m.x352 + m.x456 == 12)
m.c42 = Constraint(expr= m.x93 - m.x132 - m.x145 - m.x158 - m.x171 - m.x184 + m.x210 + m.x275 + m.x353 + m.x457 == 20)
m.c43 = Constraint(expr= m.x94 - m.x133 - m.x146 - m.x159 - m.x172 - m.x185 + m.x211 + m.x276 + m.x354 + m.x458 == 23)
m.c44 = Constraint(expr= m.x95 - m.x134 - m.x147 - m.x160 - m.x173 - m.x186 + m.x212 + m.x277 + m.x355 + m.x459
== -187)
m.c45 = Constraint(expr= m.x96 - m.x135 - m.x148 - m.x161 - m.x174 - m.x187 + m.x213 + m.x278 + m.x356 + m.x460 == 21)
m.c46 = Constraint(expr= m.x97 - m.x136 - m.x149 - m.x162 - m.x175 - m.x188 + m.x214 + m.x279 + m.x357 + m.x461 == 12)
m.c47 = Constraint(expr= m.x98 - m.x137 - m.x150 - m.x163 - m.x176 - m.x189 + m.x215 + m.x280 + m.x358 + m.x462 == 6)
m.c48 = Constraint(expr= m.x99 - m.x138 - m.x151 - m.x164 - m.x177 - m.x190 + m.x216 + m.x281 + m.x359 + m.x463 == 11)
m.c49 = Constraint(expr= m.x100 - m.x139 - m.x152 - m.x165 - m.x178 - m.x191 + m.x217 + m.x282 + m.x360 + m.x464
== 19)
m.c50 = Constraint(expr= m.x101 - m.x140 - m.x153 - m.x166 - m.x179 - m.x192 + m.x218 + m.x283 + m.x361 + m.x465 == 9)
m.c51 = Constraint(expr= m.x102 - m.x141 - m.x154 - m.x167 - m.x180 - m.x193 + m.x219 + m.x284 + m.x362 + m.x466
== 17)
m.c52 = Constraint(expr= m.x103 - m.x142 - m.x155 - m.x168 - m.x181 - m.x194 + m.x220 + m.x285 + m.x363 + m.x467
== 23)
m.c53 = Constraint(expr= m.x104 - m.x143 - m.x156 - m.x169 - m.x182 - m.x195 + m.x221 + m.x286 + m.x364 + m.x468
== 21)
m.c54 = Constraint(expr= m.x105 + m.x144 - m.x196 - m.x209 - m.x222 + m.x287 == 14)
m.c55 = Constraint(expr= m.x106 + m.x145 - m.x197 - m.x210 - m.x223 + m.x288 == 7)
m.c56 = Constraint(expr= m.x107 + m.x146 - m.x198 - m.x211 - m.x224 + m.x289 == 22)
m.c57 = Constraint(expr= m.x108 + m.x147 - m.x199 - m.x212 - m.x225 + m.x290 == 14)
m.c58 = Constraint(expr= m.x109 + m.x148 - m.x200 - m.x213 - m.x226 + m.x291 == -170)
m.c59 = Constraint(expr= m.x110 + m.x149 - m.x201 - m.x214 - m.x227 + m.x292 == 12)
m.c60 = Constraint(expr= m.x111 + m.x150 - m.x202 - m.x215 - m.x228 + m.x293 == 13)
m.c61 = Constraint(expr= m.x112 + m.x151 - m.x203 - m.x216 - m.x229 + m.x294 == 10)
m.c62 = Constraint(expr= m.x113 + m.x152 - m.x204 - m.x217 - m.x230 + m.x295 == 15)
m.c63 = Constraint(expr= m.x114 + m.x153 - m.x205 - m.x218 - m.x231 + m.x296 == 9)
m.c64 = Constraint(expr= m.x115 + m.x154 - m.x206 - m.x219 - m.x232 + m.x297 == 14)
m.c65 = Constraint(expr= m.x116 + m.x155 - m.x207 - m.x220 - m.x233 + m.x298 == 16)
m.c66 = Constraint(expr= m.x117 + m.x156 - m.x208 - m.x221 - m.x234 + m.x299 == 8)
m.c67 = Constraint(expr= m.x27 + m.x66 + m.x118 + m.x157 + m.x222 - m.x235 - m.x248 - m.x261 - m.x274 - m.x287
- m.x300 - m.x313 + m.x326 + m.x417 == 13)
m.c68 = Constraint(expr= m.x28 + m.x67 + m.x119 + m.x158 + m.x223 - m.x236 - m.x249 - m.x262 - m.x275 - m.x288
- m.x301 - m.x314 + m.x327 + m.x418 == 22)
m.c69 = Constraint(expr= m.x29 + m.x68 + m.x120 + m.x159 + m.x224 - m.x237 - m.x250 - m.x263 - m.x276 - m.x289
- m.x302 - m.x315 + m.x328 + m.x419 == 23)
m.c70 = Constraint(expr= m.x30 + m.x69 + m.x121 + m.x160 + m.x225 - m.x238 - m.x251 - m.x264 - m.x277 - m.x290
- m.x303 - m.x316 + m.x329 + m.x420 == 7)
m.c71 = Constraint(expr= m.x31 + m.x70 + m.x122 + m.x161 + m.x226 - m.x239 - m.x252 - m.x265 - m.x278 - m.x291
- m.x304 - m.x317 + m.x330 + m.x421 == 16)
m.c72 = Constraint(expr= m.x32 + m.x71 + m.x123 + m.x162 + m.x227 - m.x240 - m.x253 - m.x266 - m.x279 - m.x292
- m.x305 - m.x318 + m.x331 + m.x422 == -169)
m.c73 = Constraint(expr= m.x33 + m.x72 + m.x124 + m.x163 + m.x228 - m.x241 - m.x254 - m.x267 - m.x280 - m.x293
- m.x306 - m.x319 + m.x332 + m.x423 == 20)
m.c74 = Constraint(expr= m.x34 + m.x73 + m.x125 + m.x164 + m.x229 - m.x242 - m.x255 - m.x268 - m.x281 - m.x294
- m.x307 - m.x320 + m.x333 + m.x424 == 14)
m.c75 = Constraint(expr= m.x35 + m.x74 + m.x126 + m.x165 + m.x230 - m.x243 - m.x256 - m.x269 - m.x282 - m.x295
- m.x308 - m.x321 + m.x334 + m.x425 == 11)
m.c76 = Constraint(expr= m.x36 + m.x75 + m.x127 + m.x166 + m.x231 - m.x244 - m.x257 - m.x270 - m.x283 - m.x296
- m.x309 - m.x322 + m.x335 + m.x426 == 13)
m.c77 = Constraint(expr= m.x37 + m.x76 + m.x128 + m.x167 + m.x232 - m.x245 - m.x258 - m.x271 - m.x284 - m.x297
- m.x310 - m.x323 + m.x336 + m.x427 == 10)
m.c78 = Constraint(expr= m.x38 + m.x77 + m.x129 + m.x168 + m.x233 - m.x246 - m.x259 - m.x272 - m.x285 - m.x298
- m.x311 - m.x324 + m.x337 + m.x428 == 13)
m.c79 = Constraint(expr= m.x39 + m.x78 + m.x130 + m.x169 + m.x234 - m.x247 - m.x260 - m.x273 - m.x286 - m.x299
- m.x312 - m.x325 + m.x338 + m.x429 == 12)
m.c80 = Constraint(expr= m.x300 - m.x326 - m.x339 + m.x469 == 6)
m.c81 = Constraint(expr= m.x301 - m.x327 - m.x340 + m.x470 == 16)
m.c82 = Constraint(expr= m.x302 - m.x328 - m.x341 + m.x471 == 22)
m.c83 = Constraint(expr= m.x303 - m.x329 - m.x342 + m.x472 == 9)
m.c84 = Constraint(expr= m.x304 - m.x330 - m.x343 + m.x473 == 13)
m.c85 = Constraint(expr= m.x305 - m.x331 - m.x344 + m.x474 == 7)
m.c86 = Constraint(expr= m.x306 - m.x332 - m.x345 + m.x475 == -156)
m.c87 = Constraint(expr= m.x307 - m.x333 - m.x346 + m.x476 == 20)
m.c88 = Constraint(expr= m.x308 - m.x334 - m.x347 + m.x477 == 19)
m.c89 = Constraint(expr= m.x309 - m.x335 - m.x348 + m.x478 == 24)
m.c90 = Constraint(expr= m.x310 - m.x336 - m.x349 + m.x479 == 8)
m.c91 = Constraint(expr= m.x311 - m.x337 - m.x350 + m.x480 == 21)
m.c92 = Constraint(expr= m.x312 - m.x338 - m.x351 + m.x481 == 6)
m.c93 = Constraint(expr= m.x170 - m.x352 - m.x365 + m.x391 == 15)
m.c94 = Constraint(expr= m.x171 - m.x353 - m.x366 + m.x392 == 15)
m.c95 = Constraint(expr= m.x172 - m.x354 - m.x367 + m.x393 == 23)
m.c96 = Constraint(expr= m.x173 - m.x355 - m.x368 + m.x394 == 25)
m.c97 = Constraint(expr= m.x174 - m.x356 - m.x369 + m.x395 == 20)
m.c98 = Constraint(expr= m.x175 - m.x357 - m.x370 + m.x396 == 7)
m.c99 = Constraint(expr= m.x176 - m.x358 - m.x371 + m.x397 == 19)
m.c100 = Constraint(expr= m.x177 - m.x359 - m.x372 + m.x398 == -177)
m.c101 = Constraint(expr= m.x178 - m.x360 - m.x373 + m.x399 == 7)
m.c102 = Constraint(expr= m.x179 - m.x361 - m.x374 + m.x400 == 18)
m.c103 = Constraint(expr= m.x180 - m.x362 - m.x375 + m.x401 == 25)
m.c104 = Constraint(expr= m.x181 - m.x363 - m.x376 + m.x402 == 20)
m.c105 = Constraint(expr= m.x182 - m.x364 - m.x377 + m.x403 == 18)
m.c106 = Constraint(expr= m.x40 + m.x365 - m.x378 - m.x391 - m.x404 + m.x430 == 8)
m.c107 = Constraint(expr= m.x41 + m.x366 - m.x379 - m.x392 - m.x405 + m.x431 == 11)
m.c108 = Constraint(expr= m.x42 + m.x367 - m.x380 - m.x393 - m.x406 + m.x432 == 23)
m.c109 = Constraint(expr= m.x43 + m.x368 - m.x381 - m.x394 - m.x407 + m.x433 == 7)
m.c110 = Constraint(expr= m.x44 + m.x369 - m.x382 - m.x395 - m.x408 + m.x434 == 5)
m.c111 = Constraint(expr= m.x45 + m.x370 - m.x383 - m.x396 - m.x409 + m.x435 == 15)
m.c112 = Constraint(expr= m.x46 + m.x371 - m.x384 - m.x397 - m.x410 + m.x436 == 7)
m.c113 = Constraint(expr= m.x47 + m.x372 - m.x385 - m.x398 - m.x411 + m.x437 == 10)
m.c114 = Constraint(expr= m.x48 + m.x373 - m.x386 - m.x399 - m.x412 + m.x438 == -179)
m.c115 = Constraint(expr= m.x49 + m.x374 - m.x387 - m.x400 - m.x413 + m.x439 == 20)
m.c116 = Constraint(expr= m.x50 + m.x375 - m.x388 - m.x401 - m.x414 + m.x440 == 18)
m.c117 = Constraint(expr= m.x51 + m.x376 - m.x389 - m.x402 - m.x415 + m.x441 == 8)
m.c118 = Constraint(expr= m.x52 + m.x377 - m.x390 - m.x403 - m.x416 + m.x442 == 12)
m.c119 = Constraint(expr= m.x313 + m.x404 - m.x417 - m.x430 - m.x443 + m.x521 == 9)
m.c120 = Constraint(expr= m.x314 + m.x405 - m.x418 - m.x431 - m.x444 + m.x522 == 12)
m.c121 = Constraint(expr= m.x315 + m.x406 - m.x419 - m.x432 - m.x445 + m.x523 == 24)
m.c122 = Constraint(expr= m.x316 + m.x407 - m.x420 - m.x433 - m.x446 + m.x524 == 21)
m.c123 = Constraint(expr= m.x317 + m.x408 - m.x421 - m.x434 - m.x447 + m.x525 == 8)
m.c124 = Constraint(expr= m.x318 + m.x409 - m.x422 - m.x435 - m.x448 + m.x526 == 9)
m.c125 = Constraint(expr= m.x319 + m.x410 - m.x423 - m.x436 - m.x449 + m.x527 == 11)
m.c126 = Constraint(expr= m.x320 + m.x411 - m.x424 - m.x437 - m.x450 + m.x528 == 13)
m.c127 = Constraint(expr= m.x321 + m.x412 - m.x425 - m.x438 - m.x451 + m.x529 == 11)
m.c128 = Constraint(expr= m.x322 + m.x413 - m.x426 - m.x439 - m.x452 + m.x530 == -183)
m.c129 = Constraint(expr= m.x323 + m.x414 - m.x427 - m.x440 - m.x453 + m.x531 == 16)
m.c130 = Constraint(expr= m.x324 + m.x415 - m.x428 - m.x441 - m.x454 + m.x532 == 14)
m.c131 = Constraint(expr= m.x325 + m.x416 - m.x429 - m.x442 - m.x455 + m.x533 == 17)
m.c132 = Constraint(expr= m.x183 + m.x339 - m.x456 - m.x469 - m.x482 + m.x495 == 22)
m.c133 = Constraint(expr= m.x184 + m.x340 - m.x457 - m.x470 - m.x483 + m.x496 == 12)
m.c134 = Constraint(expr= m.x185 + m.x341 - m.x458 - m.x471 - m.x484 + m.x497 == 7)
m.c135 = Constraint(expr= m.x186 + m.x342 - m.x459 - m.x472 - m.x485 + m.x498 == 12)
m.c136 = Constraint(expr= m.x187 + m.x343 - m.x460 - m.x473 - m.x486 + m.x499 == 12)
m.c137 = Constraint(expr= m.x188 + m.x344 - m.x461 - m.x474 - m.x487 + m.x500 == 10)
m.c138 = Constraint(expr= m.x189 + m.x345 - m.x462 - m.x475 - m.x488 + m.x501 == 11)
m.c139 = Constraint(expr= m.x190 + m.x346 - m.x463 - m.x476 - m.x489 + m.x502 == 17)
m.c140 = Constraint(expr= m.x191 + m.x347 - m.x464 - m.x477 - m.x490 + m.x503 == 17)
m.c141 = Constraint(expr= m.x192 + m.x348 - m.x465 - m.x478 - m.x491 + m.x504 == 12)
m.c142 = Constraint(expr= m.x193 + m.x349 - m.x466 - m.x479 - m.x492 + m.x505 == -185)
m.c143 = Constraint(expr= m.x194 + m.x350 - m.x467 - m.x480 - m.x493 + m.x506 == 10)
m.c144 = Constraint(expr= m.x195 + m.x351 - m.x468 - m.x481 - m.x494 + m.x507 == 21)
m.c145 = Constraint(expr= m.x482 - m.x495 - m.x508 + m.x534 == 8)
m.c146 = Constraint(expr= m.x483 - m.x496 - m.x509 + m.x535 == 20)
m.c147 = Constraint(expr= m.x484 - m.x497 - m.x510 + m.x536 == 23)
m.c148 = Constraint(expr= m.x485 - m.x498 - m.x511 + m.x537 == 18)
m.c149 = Constraint(expr= m.x486 - m.x499 - m.x512 + m.x538 == 15)
m.c150 = Constraint(expr= m.x487 - m.x500 - m.x513 + m.x539 == 22)
m.c151 = Constraint(expr= m.x488 - m.x501 - m.x514 + m.x540 == 17)
m.c152 = Constraint(expr= m.x489 - m.x502 - m.x515 + m.x541 == 24)
m.c153 = Constraint(expr= m.x490 - m.x503 - m.x516 + m.x542 == 7)
m.c154 = Constraint(expr= m.x491 - m.x504 - m.x517 + m.x543 == 16)
m.c155 = Constraint(expr= m.x492 - m.x505 - m.x518 + m.x544 == 24)
m.c156 = Constraint(expr= m.x493 - m.x506 - m.x519 + m.x545 == -200)
m.c157 = Constraint(expr= m.x494 - m.x507 - m.x520 + m.x546 == 8)
m.c158 = Constraint(expr= m.x443 + m.x508 - m.x521 - m.x534 == 19)
m.c159 = Constraint(expr= m.x444 + m.x509 - m.x522 - m.x535 == 15)
m.c160 = Constraint(expr= m.x445 + m.x510 - m.x523 - m.x536 == 10)
m.c161 = Constraint(expr= m.x446 + m.x511 - m.x524 - m.x537 == 13)
m.c162 = Constraint(expr= m.x447 + m.x512 - m.x525 - m.x538 == 11)
m.c163 = Constraint(expr= m.x448 + m.x513 - m.x526 - m.x539 == 8)
m.c164 = Constraint(expr= m.x449 + m.x514 - m.x527 - m.x540 == 13)
m.c165 = Constraint(expr= m.x450 + m.x515 - m.x528 - m.x541 == 23)
m.c166 = Constraint(expr= m.x451 + m.x516 - m.x529 - m.x542 == 23)
m.c167 = Constraint(expr= m.x452 + m.x517 - m.x530 - m.x543 == 14)
m.c168 = Constraint(expr= m.x453 + m.x518 - m.x531 - m.x544 == 8)
m.c169 = Constraint(expr= m.x454 + m.x519 - m.x532 - m.x545 == 25)
m.c170 = Constraint(expr= m.x455 + m.x520 - m.x533 - m.x546 == -157)
m.c171 = Constraint(expr= - m.x1 - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 - m.x12 - m.x13
+ m.x632 >= 0)
m.c172 = Constraint(expr= - m.x14 - m.x15 - m.x16 - m.x17 - m.x18 - m.x19 - m.x20 - m.x21 - m.x22 - m.x23 - m.x24
- m.x25 - m.x26 + m.x633 >= 0)
m.c173 = Constraint(expr= - m.x27 - m.x28 - m.x29 - m.x30 - m.x31 - m.x32 - m.x33 - m.x34 - m.x35 - m.x36 - m.x37
- m.x38 - m.x39 + m.x634 >= 0)
m.c174 = Constraint(expr= - m.x40 - m.x41 - m.x42 - m.x43 - m.x44 - m.x45 - m.x46 - m.x47 - m.x48 - m.x49 - m.x50
- m.x51 - m.x52 + m.x635 >= 0)
m.c175 = Constraint(expr= - m.x53 - m.x54 - m.x55 - m.x56 - m.x57 - m.x58 - m.x59 - m.x60 - m.x61 - m.x62 - m.x63
- m.x64 - m.x65 + m.x636 >= 0)
m.c176 = Constraint(expr= - m.x66 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x72 - m.x73 - m.x74 - m.x75 - m.x76
- m.x77 - m.x78 + m.x637 >= 0)
m.c177 = Constraint(expr= - m.x79 - m.x80 - m.x81 - m.x82 - m.x83 - m.x84 - m.x85 - m.x86 - m.x87 - m.x88 - m.x89
- m.x90 - m.x91 + m.x638 >= 0)
m.c178 = Constraint(expr= - m.x92 - m.x93 - m.x94 - m.x95 - m.x96 - m.x97 - m.x98 - m.x99 - m.x100 - m.x101 - m.x102
- m.x103 - m.x104 + m.x639 >= 0)
m.c179 = Constraint(expr= - m.x105 - m.x106 - m.x107 - m.x108 - m.x109 - m.x110 - m.x111 - m.x112 - m.x113 - m.x114
- m.x115 - m.x116 - m.x117 + m.x640 >= 0)
m.c180 = Constraint(expr= - m.x118 - m.x119 - m.x120 - m.x121 - m.x122 - m.x123 - m.x124 - m.x125 - m.x126 - m.x127
- m.x128 - m.x129 - m.x130 + m.x641 >= 0)
m.c181 = Constraint(expr= - m.x131 - m.x132 - m.x133 - m.x134 - m.x135 - m.x136 - m.x137 - m.x138 - m.x139 - m.x140
- m.x141 - m.x142 - m.x143 + m.x642 >= 0)
m.c182 = Constraint(expr= - m.x144 - m.x145 - m.x146 - m.x147 - m.x148 - m.x149 - m.x150 - m.x151 - m.x152 - m.x153
- m.x154 - m.x155 - m.x156 + m.x643 >= 0)
m.c183 = Constraint(expr= - m.x157 - m.x158 - m.x159 - m.x160 - m.x161 - m.x162 - m.x163 - m.x164 - m.x165 - m.x166
- m.x167 - m.x168 - m.x169 + m.x644 >= 0)
m.c184 = Constraint(expr= - m.x170 - m.x171 - m.x172 - m.x173 - m.x174 - m.x175 - m.x176 - m.x177 - m.x178 - m.x179
- m.x180 - m.x181 - m.x182 + m.x645 >= 0)
m.c185 = Constraint(expr= - m.x183 - m.x184 - m.x185 - m.x186 - m.x187 - m.x188 - m.x189 - m.x190 - m.x191 - m.x192
- m.x193 - m.x194 - m.x195 + m.x646 >= 0)
m.c186 = Constraint(expr= - m.x196 - m.x197 - m.x198 - m.x199 - m.x200 - m.x201 - m.x202 - m.x203 - m.x204 - m.x205
- m.x206 - m.x207 - m.x208 + m.x647 >= 0)
m.c187 = Constraint(expr= - m.x209 - m.x210 - m.x211 - m.x212 - m.x213 - m.x214 - m.x215 - m.x216 - m.x217 - m.x218
- m.x219 - m.x220 - m.x221 + m.x648 >= 0)
m.c188 = Constraint(expr= - m.x222 - m.x223 - m.x224 - m.x225 - m.x226 - m.x227 - m.x228 - m.x229 - m.x230 - m.x231
- m.x232 - m.x233 - m.x234 + m.x649 >= 0)
m.c189 = Constraint(expr= - m.x235 - m.x236 - m.x237 - m.x238 - m.x239 - m.x240 - m.x241 - m.x242 - m.x243 - m.x244
- m.x245 - m.x246 - m.x247 + m.x650 >= 0)
m.c190 = Constraint(expr= - m.x248 - m.x249 - m.x250 - m.x251 - m.x252 - m.x253 - m.x254 - m.x255 - m.x256 - m.x257
- m.x258 - m.x259 - m.x260 + m.x651 >= 0)
m.c191 = Constraint(expr= - m.x261 - m.x262 - m.x263 - m.x264 - m.x265 - m.x266 - m.x267 - m.x268 - m.x269 - m.x270
- m.x271 - m.x272 - m.x273 + m.x652 >= 0)
m.c192 = Constraint(expr= - m.x274 - m.x275 - m.x276 - m.x277 - m.x278 - m.x279 - m.x280 - m.x281 - m.x282 - m.x283
- m.x284 - m.x285 - m.x286 + m.x653 >= 0)
m.c193 = Constraint(expr= - m.x287 - m.x288 - m.x289 - m.x290 - m.x291 - m.x292 - m.x293 - m.x294 - m.x295 - m.x296
- m.x297 - m.x298 - m.x299 + m.x654 >= 0)
m.c194 = Constraint(expr= - m.x300 - m.x301 - m.x302 - m.x303 - m.x304 - m.x305 - m.x306 - m.x307 - m.x308 - m.x309
- m.x310 - m.x311 - m.x312 + m.x655 >= 0)
m.c195 = Constraint(expr= - m.x313 - m.x314 - m.x315 - m.x316 - m.x317 - m.x318 - m.x319 - m.x320 - m.x321 - m.x322
- m.x323 - m.x324 - m.x325 + m.x656 >= 0)
m.c196 = Constraint(expr= - m.x326 - m.x327 - m.x328 - m.x329 - m.x330 - m.x331 - m.x332 - m.x333 - m.x334 - m.x335
- m.x336 - m.x337 - m.x338 + m.x657 >= 0)
m.c197 = Constraint(expr= - m.x339 - m.x340 - m.x341 - m.x342 - m.x343 - m.x344 - m.x345 - m.x346 - m.x347 - m.x348
- m.x349 - m.x350 - m.x351 + m.x658 >= 0)
m.c198 = Constraint(expr= - m.x352 - m.x353 - m.x354 - m.x355 - m.x356 - m.x357 - m.x358 - m.x359 - m.x360 - m.x361
- m.x362 - m.x363 - m.x364 + m.x659 >= 0)
m.c199 = Constraint(expr= - m.x365 - m.x366 - m.x367 - m.x368 - m.x369 - m.x370 - m.x371 - m.x372 - m.x373 - m.x374
- m.x375 - m.x376 - m.x377 + m.x660 >= 0)
m.c200 = Constraint(expr= - m.x378 - m.x379 - m.x380 - m.x381 - m.x382 - m.x383 - m.x384 - m.x385 - m.x386 - m.x387
- m.x388 - m.x389 - m.x390 + m.x661 >= 0)
m.c201 = Constraint(expr= - m.x391 - m.x392 - m.x393 - m.x394 - m.x395 - m.x396 - m.x397 - m.x398 - m.x399 - m.x400
- m.x401 - m.x402 - m.x403 + m.x662 >= 0)
m.c202 = Constraint(expr= - m.x404 - m.x405 - m.x406 - m.x407 - m.x408 - m.x409 - m.x410 - m.x411 - m.x412 - m.x413
- m.x414 - m.x415 - m.x416 + m.x663 >= 0)
m.c203 = Constraint(expr= - m.x417 - m.x418 - m.x419 - m.x420 - m.x421 - m.x422 - m.x423 - m.x424 - m.x425 - m.x426
- m.x427 - m.x428 - m.x429 + m.x664 >= 0)
m.c204 = Constraint(expr= - m.x430 - m.x431 - m.x432 - m.x433 - m.x434 - m.x435 - m.x436 - m.x437 - m.x438 - m.x439
- m.x440 - m.x441 - m.x442 + m.x665 >= 0)
m.c205 = Constraint(expr= - m.x443 - m.x444 - m.x445 - m.x446 - m.x447 - m.x448 - m.x449 - m.x450 - m.x451 - m.x452
- m.x453 - m.x454 - m.x455 + m.x666 >= 0)
m.c206 = Constraint(expr= - m.x456 - m.x457 - m.x458 - m.x459 - m.x460 - m.x461 - m.x462 - m.x463 - m.x464 - m.x465
- m.x466 - m.x467 - m.x468 + m.x667 >= 0)
m.c207 = Constraint(expr= - m.x469 - m.x470 - m.x471 - m.x472 - m.x473 - m.x474 - m.x475 - m.x476 - m.x477 - m.x478
- m.x479 - m.x480 - m.x481 + m.x668 >= 0)
m.c208 = Constraint(expr= - m.x482 - m.x483 - m.x484 - m.x485 - m.x486 - m.x487 - m.x488 - m.x489 - m.x490 - m.x491
- m.x492 - m.x493 - m.x494 + m.x669 >= 0)
m.c209 = Constraint(expr= - m.x495 - m.x496 - m.x497 - m.x498 - m.x499 - m.x500 - m.x501 - m.x502 - m.x503 - m.x504
- m.x505 - m.x506 - m.x507 + m.x670 >= 0)
m.c210 = Constraint(expr= - m.x508 - m.x509 - m.x510 - m.x511 - m.x512 - m.x513 - m.x514 - m.x515 - m.x516 - m.x517
- m.x518 - m.x519 - m.x520 + m.x671 >= 0)
m.c211 = Constraint(expr= - m.x521 - m.x522 - m.x523 - m.x524 - m.x525 - m.x526 - m.x527 - m.x528 - m.x529 - m.x530
- m.x531 - m.x532 - m.x533 + m.x672 >= 0)
m.c212 = Constraint(expr= - m.x534 - m.x535 - m.x536 - m.x537 - m.x538 - m.x539 - m.x540 - m.x541 - m.x542 - m.x543
- m.x544 - m.x545 - m.x546 + m.x673 >= 0)
m.c213 = Constraint(expr=166*m.x632*m.b547 - 166*m.b547*m.x589 + m.x632*m.x589 <= 0)
m.c214 = Constraint(expr=463*m.x633*m.b548 - 463*m.b548*m.x590 + m.x633*m.x590 <= 0)
m.c215 = Constraint(expr=522*m.x634*m.b549 - 522*m.b549*m.x591 + m.x634*m.x591 <= 0)
m.c216 = Constraint(expr=141*m.x635*m.b550 - 141*m.b550*m.x592 + m.x635*m.x592 <= 0)
m.c217 = Constraint(expr=166*m.x636*m.b551 - 166*m.b551*m.x593 + m.x636*m.x593 <= 0)
m.c218 = Constraint(expr=265*m.x637*m.b552 - 265*m.b552*m.x594 + m.x637*m.x594 <= 0)
m.c219 = Constraint(expr=463*m.x638*m.b553 - 463*m.b553*m.x595 + m.x638*m.x595 <= 0)
m.c220 = Constraint(expr=456*m.x639*m.b554 - 456*m.b554*m.x596 + m.x639*m.x596 <= 0)
m.c221 = Constraint(expr=526*m.x640*m.b555 - 526*m.b555*m.x597 + m.x640*m.x597 <= 0)
m.c222 = Constraint(expr=152*m.x641*m.b556 - 152*m.b556*m.x598 + m.x641*m.x598 <= 0)
m.c223 = Constraint(expr=456*m.x642*m.b557 - 456*m.b557*m.x599 + m.x642*m.x599 <= 0)
m.c224 = Constraint(expr=384*m.x643*m.b558 - 384*m.b558*m.x600 + m.x643*m.x600 <= 0)
m.c225 = Constraint(expr=441*m.x644*m.b559 - 441*m.b559*m.x601 + m.x644*m.x601 <= 0)
m.c226 = Constraint(expr=309*m.x645*m.b560 - 309*m.b560*m.x602 + m.x645*m.x602 <= 0)
m.c227 = Constraint(expr=233*m.x646*m.b561 - 233*m.b561*m.x603 + m.x646*m.x603 <= 0)
m.c228 = Constraint(expr=526*m.x647*m.b562 - 526*m.b562*m.x604 + m.x647*m.x604 <= 0)
m.c229 = Constraint(expr=384*m.x648*m.b563 - 384*m.b563*m.x605 + m.x648*m.x605 <= 0)
m.c230 = Constraint(expr=203*m.x649*m.b564 - 203*m.b564*m.x606 + m.x649*m.x606 <= 0)
m.c231 = Constraint(expr=522*m.x650*m.b565 - 522*m.b565*m.x607 + m.x650*m.x607 <= 0)
m.c232 = Constraint(expr=265*m.x651*m.b566 - 265*m.b566*m.x608 + m.x651*m.x608 <= 0)
m.c233 = Constraint(expr=152*m.x652*m.b567 - 152*m.b567*m.x609 + m.x652*m.x609 <= 0)
m.c234 = Constraint(expr=441*m.x653*m.b568 - 441*m.b568*m.x610 + m.x653*m.x610 <= 0)
m.c235 = Constraint(expr=203*m.x654*m.b569 - 203*m.b569*m.x611 + m.x654*m.x611 <= 0)
m.c236 = Constraint(expr=284*m.x655*m.b570 - 284*m.b570*m.x612 + m.x655*m.x612 <= 0)
m.c237 = Constraint(expr=426*m.x656*m.b571 - 426*m.b571*m.x613 + m.x656*m.x613 <= 0)
m.c238 = Constraint(expr=284*m.x657*m.b572 - 284*m.b572*m.x614 + m.x657*m.x614 <= 0)
m.c239 = Constraint(expr=109*m.x658*m.b573 - 109*m.b573*m.x615 + m.x658*m.x615 <= 0)
m.c240 = Constraint(expr=309*m.x659*m.b574 - 309*m.b574*m.x616 + m.x659*m.x616 <= 0)
m.c241 = Constraint(expr=434*m.x660*m.b575 - 434*m.b575*m.x617 + m.x660*m.x617 <= 0)
m.c242 = Constraint(expr=141*m.x661*m.b576 - 141*m.b576*m.x618 + m.x661*m.x618 <= 0)
m.c243 = Constraint(expr=434*m.x662*m.b577 - 434*m.b577*m.x619 + m.x662*m.x619 <= 0)
m.c244 = Constraint(expr=403*m.x663*m.b578 - 403*m.b578*m.x620 + m.x663*m.x620 <= 0)
m.c245 = Constraint(expr=426*m.x664*m.b579 - 426*m.b579*m.x621 + m.x664*m.x621 <= 0)
m.c246 = Constraint(expr=403*m.x665*m.b580 - 403*m.b580*m.x622 + m.x665*m.x622 <= 0)
m.c247 = Constraint(expr=151*m.x666*m.b581 - 151*m.b581*m.x623 + m.x666*m.x623 <= 0)
m.c248 = Constraint(expr=233*m.x667*m.b582 - 233*m.b582*m.x624 + m.x667*m.x624 <= 0)
m.c249 = Constraint(expr=109*m.x668*m.b583 - 109*m.b583*m.x625 + m.x668*m.x625 <= 0)
m.c250 = Constraint(expr=367*m.x669*m.b584 - 367*m.b584*m.x626 + m.x669*m.x626 <= 0)
m.c251 = Constraint(expr=367*m.x670*m.b585 - 367*m.b585*m.x627 + m.x670*m.x627 <= 0)
m.c252 = Constraint(expr=382*m.x671*m.b586 - 382*m.b586*m.x628 + m.x671*m.x628 <= 0)
m.c253 = Constraint(expr=151*m.x672*m.b587 - 151*m.b587*m.x629 + m.x672*m.x629 <= 0)
m.c254 = Constraint(expr=382*m.x673*m.b588 - 382*m.b588*m.x630 + m.x673*m.x630 <= 0)
m.c255 = Constraint(expr= m.x589 + m.x590 + m.x591 + m.x592 + m.x593 + m.x594 + m.x595 + m.x596 + m.x597 + m.x598
+ m.x599 + m.x600 + m.x601 + m.x602 + m.x603 + m.x604 + m.x605 + m.x606 + m.x607 + m.x608
+ m.x609 + m.x610 + m.x611 + m.x612 + m.x613 + m.x614 + m.x615 + m.x616 + m.x617 + m.x618
+ m.x619 + m.x620 + m.x621 + m.x622 + m.x623 + m.x624 + m.x625 + m.x626 + m.x627 + m.x628
+ m.x629 + m.x630 <= 18536)
m.c256 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x7 + m.x8 + m.x9 + m.x10 + m.x11 + m.x12 + m.x13
- 166*m.b547 <= 0)
m.c257 = Constraint(expr= m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x25 + m.x26 - 463*m.b548 <= 0)
m.c258 = Constraint(expr= m.x27 + m.x28 + m.x29 + m.x30 + m.x31 + m.x32 + m.x33 + m.x34 + m.x35 + m.x36 + m.x37
+ m.x38 + m.x39 - 522*m.b549 <= 0)
m.c259 = Constraint(expr= m.x40 + m.x41 + m.x42 + m.x43 + m.x44 + m.x45 + m.x46 + m.x47 + m.x48 + m.x49 + m.x50
+ m.x51 + m.x52 - 141*m.b550 <= 0)
m.c260 = Constraint(expr= m.x53 + m.x54 + m.x55 + m.x56 + m.x57 + m.x58 + m.x59 + m.x60 + m.x61 + m.x62 + m.x63
+ m.x64 + m.x65 - 166*m.b551 <= 0)
m.c261 = Constraint(expr= m.x66 + m.x67 + m.x68 + m.x69 + m.x70 + m.x71 + m.x72 + m.x73 + m.x74 + m.x75 + m.x76
+ m.x77 + m.x78 - 265*m.b552 <= 0)
m.c262 = Constraint(expr= m.x79 + m.x80 + m.x81 + m.x82 + m.x83 + m.x84 + m.x85 + m.x86 + m.x87 + m.x88 + m.x89
+ m.x90 + m.x91 - 463*m.b553 <= 0)
m.c263 = Constraint(expr= m.x92 + m.x93 + m.x94 + m.x95 + m.x96 + m.x97 + m.x98 + m.x99 + m.x100 + m.x101 + m.x102
+ m.x103 + m.x104 - 456*m.b554 <= 0)
m.c264 = Constraint(expr= m.x105 + m.x106 + m.x107 + m.x108 + m.x109 + m.x110 + m.x111 + m.x112 + m.x113 + m.x114
+ m.x115 + m.x116 + m.x117 - 526*m.b555 <= 0)
m.c265 = Constraint(expr= m.x118 + m.x119 + m.x120 + m.x121 + m.x122 + m.x123 + m.x124 + m.x125 + m.x126 + m.x127
+ m.x128 + m.x129 + m.x130 - 152*m.b556 <= 0)
m.c266 = Constraint(expr= m.x131 + m.x132 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137 + m.x138 + m.x139 + m.x140
+ m.x141 + m.x142 + m.x143 - 456*m.b557 <= 0)
m.c267 = Constraint(expr= m.x144 + m.x145 + m.x146 + m.x147 + m.x148 + m.x149 + m.x150 + m.x151 + m.x152 + m.x153
+ m.x154 + m.x155 + m.x156 - 384*m.b558 <= 0)
m.c268 = Constraint(expr= m.x157 + m.x158 + m.x159 + m.x160 + m.x161 + m.x162 + m.x163 + m.x164 + m.x165 + m.x166
+ m.x167 + m.x168 + m.x169 - 441*m.b559 <= 0)
m.c269 = Constraint(expr= m.x170 + m.x171 + m.x172 + m.x173 + m.x174 + m.x175 + m.x176 + m.x177 + m.x178 + m.x179
+ m.x180 + m.x181 + m.x182 - 309*m.b560 <= 0)
m.c270 = Constraint(expr= m.x183 + m.x184 + m.x185 + m.x186 + m.x187 + m.x188 + m.x189 + m.x190 + m.x191 + m.x192
+ m.x193 + m.x194 + m.x195 - 233*m.b561 <= 0)
m.c271 = Constraint(expr= m.x196 + m.x197 + m.x198 + m.x199 + m.x200 + m.x201 + m.x202 + m.x203 + m.x204 + m.x205
+ m.x206 + m.x207 + m.x208 - 526*m.b562 <= 0)
m.c272 = Constraint(expr= m.x209 + m.x210 + m.x211 + m.x212 + m.x213 + m.x214 + m.x215 + m.x216 + m.x217 + m.x218
+ m.x219 + m.x220 + m.x221 - 384*m.b563 <= 0)
m.c273 = Constraint(expr= m.x222 + m.x223 + m.x224 + m.x225 + m.x226 + m.x227 + m.x228 + m.x229 + m.x230 + m.x231
+ m.x232 + m.x233 + m.x234 - 203*m.b564 <= 0)
m.c274 = Constraint(expr= m.x235 + m.x236 + m.x237 + m.x238 + m.x239 + m.x240 + m.x241 + m.x242 + m.x243 + m.x244
+ m.x245 + m.x246 + m.x247 - 522*m.b565 <= 0)
m.c275 = Constraint(expr= m.x248 + m.x249 + m.x250 + m.x251 + m.x252 + m.x253 + m.x254 + m.x255 + m.x256 + m.x257
+ m.x258 + m.x259 + m.x260 - 265*m.b566 <= 0)
m.c276 = Constraint(expr= m.x261 + m.x262 + m.x263 + m.x264 + m.x265 + m.x266 + m.x267 + m.x268 + m.x269 + m.x270
+ m.x271 + m.x272 + m.x273 - 152*m.b567 <= 0)
m.c277 = Constraint(expr= m.x274 + m.x275 + m.x276 + m.x277 + m.x278 + m.x279 + m.x280 + m.x281 + m.x282 + m.x283
+ m.x284 + m.x285 + m.x286 - 441*m.b568 <= 0)
m.c278 = Constraint(expr= m.x287 + m.x288 + m.x289 + m.x290 + m.x291 + m.x292 + m.x293 + m.x294 + m.x295 + m.x296
+ m.x297 + m.x298 + m.x299 - 203*m.b569 <= 0)
m.c279 = Constraint(expr= m.x300 + m.x301 + m.x302 + m.x303 + m.x304 + m.x305 + m.x306 + m.x307 + m.x308 + m.x309
+ m.x310 + m.x311 + m.x312 - 284*m.b570 <= 0)
m.c280 = Constraint(expr= m.x313 + m.x314 + m.x315 + m.x316 + m.x317 + m.x318 + m.x319 + m.x320 + m.x321 + m.x322
+ m.x323 + m.x324 + m.x325 - 426*m.b571 <= 0)
m.c281 = Constraint(expr= m.x326 + m.x327 + m.x328 + m.x329 + m.x330 + m.x331 + m.x332 + m.x333 + m.x334 + m.x335
+ m.x336 + m.x337 + m.x338 - 284*m.b572 <= 0)
m.c282 = Constraint(expr= m.x339 + m.x340 + m.x341 + m.x342 + m.x343 + m.x344 + m.x345 + m.x346 + m.x347 + m.x348
+ m.x349 + m.x350 + m.x351 - 109*m.b573 <= 0)
m.c283 = Constraint(expr= m.x352 + m.x353 + m.x354 + m.x355 + m.x356 + m.x357 + m.x358 + m.x359 + m.x360 + m.x361
+ m.x362 + m.x363 + m.x364 - 309*m.b574 <= 0)
m.c284 = Constraint(expr= m.x365 + m.x366 + m.x367 + m.x368 + m.x369 + m.x370 + m.x371 + m.x372 + m.x373 + m.x374
+ m.x375 + m.x376 + m.x377 - 434*m.b575 <= 0)
m.c285 = Constraint(expr= m.x378 + m.x379 + m.x380 + m.x381 + m.x382 + m.x383 + m.x384 + m.x385 + m.x386 + m.x387
+ m.x388 + m.x389 + m.x390 - 141*m.b576 <= 0)
m.c286 = Constraint(expr= m.x391 + m.x392 + m.x393 + m.x394 + m.x395 + m.x396 + m.x397 + m.x398 + m.x399 + m.x400
+ m.x401 + m.x402 + m.x403 - 434*m.b577 <= 0)
m.c287 = Constraint(expr= m.x404 + m.x405 + m.x406 + m.x407 + m.x408 + m.x409 + m.x410 + m.x411 + m.x412 + m.x413
+ m.x414 + m.x415 + m.x416 - 403*m.b578 <= 0)
m.c288 = Constraint(expr= m.x417 + m.x418 + m.x419 + m.x420 + m.x421 + m.x422 + m.x423 + m.x424 + m.x425 + m.x426
+ m.x427 + m.x428 + m.x429 - 426*m.b579 <= 0)
m.c289 = Constraint(expr= m.x430 + m.x431 + m.x432 + m.x433 + m.x434 + m.x435 + m.x436 + m.x437 + m.x438 + m.x439
+ m.x440 + m.x441 + m.x442 - 403*m.b580 <= 0)
m.c290 = Constraint(expr= m.x443 + m.x444 + m.x445 + m.x446 + m.x447 + m.x448 + m.x449 + m.x450 + m.x451 + m.x452
+ m.x453 + m.x454 + m.x455 - 151*m.b581 <= 0)
m.c291 = Constraint(expr= m.x456 + m.x457 + m.x458 + m.x459 + m.x460 + m.x461 + m.x462 + m.x463 + m.x464 + m.x465
+ m.x466 + m.x467 + m.x468 - 233*m.b582 <= 0)
m.c292 = Constraint(expr= m.x469 + m.x470 + m.x471 + m.x472 + m.x473 + m.x474 + m.x475 + m.x476 + m.x477 + m.x478
+ m.x479 + m.x480 + m.x481 - 109*m.b583 <= 0)
m.c293 = Constraint(expr= m.x482 + m.x483 + m.x484 + m.x485 + m.x486 + m.x487 + m.x488 + m.x489 + m.x490 + m.x491
+ m.x492 + m.x493 + m.x494 - 367*m.b584 <= 0)
m.c294 = Constraint(expr= m.x495 + m.x496 + m.x497 + m.x498 + m.x499 + m.x500 + m.x501 + m.x502 + m.x503 + m.x504
+ m.x505 + m.x506 + m.x507 - 367*m.b585 <= 0)
m.c295 = Constraint(expr= m.x508 + m.x509 + m.x510 + m.x511 + m.x512 + m.x513 + m.x514 + m.x515 + m.x516 + m.x517
+ m.x518 + m.x519 + m.x520 - 382*m.b586 <= 0)
m.c296 = Constraint(expr= m.x521 + m.x522 + m.x523 + m.x524 + m.x525 + m.x526 + m.x527 + m.x528 + m.x529 + m.x530
+ m.x531 + m.x532 + m.x533 - 151*m.b587 <= 0)
m.c297 = Constraint(expr= m.x534 + m.x535 + m.x536 + m.x537 + m.x538 + m.x539 + m.x540 + m.x541 + m.x542 + m.x543
+ m.x544 + m.x545 + m.x546 - 382*m.b588 <= 0) | en | 0.748015 | # MINLP written by GAMS Convert at 08/20/20 01:30:45 # # Equation counts # Total E G L N X C B # 297 170 42 85 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 673 631 42 0 0 0 0 0 # FX 0 0 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 2479 2353 126 0 # # Reformulation has removed 1 variable and 1 equation | 1.315837 | 1 |
tests/pytests/scenarios/multimaster/conftest.py | lllamnyp/salt | 0 | 10416 | <gh_stars>0
import logging
import os
import shutil
import subprocess
import pytest
import salt.utils.platform
log = logging.getLogger(__name__)
@pytest.fixture(scope="package", autouse=True)
def skip_on_tcp_transport(request):
if request.config.getoption("--transport") == "tcp":
pytest.skip("Multimaster under the TPC transport is not working. See #59053")
@pytest.fixture(scope="package")
def salt_mm_master_1(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
config_overrides = {
"interface": "127.0.0.1",
}
factory = salt_factories.salt_master_daemon(
"mm-master-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_1_salt_cli(salt_mm_master_1):
return salt_mm_master_1.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_master_2(salt_factories, salt_mm_master_1):
if salt.utils.platform.is_darwin() or salt.utils.platform.is_freebsd():
subprocess.check_output(["ifconfig", "lo0", "alias", "127.0.0.2", "up"])
config_defaults = {
"open_mode": True,
"transport": salt_mm_master_1.config["transport"],
}
config_overrides = {
"interface": "127.0.0.2",
}
# Use the same ports for both masters, they are binding to different interfaces
for key in (
"ret_port",
"publish_port",
):
config_overrides[key] = salt_mm_master_1.config[key]
factory = salt_factories.salt_master_daemon(
"mm-master-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_2_salt_cli(salt_mm_master_2):
return salt_mm_master_2.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_1.salt_minion_daemon(
"mm-minion-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_2.salt_minion_daemon(
"mm-minion-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
| import logging
import os
import shutil
import subprocess
import pytest
import salt.utils.platform
log = logging.getLogger(__name__)
@pytest.fixture(scope="package", autouse=True)
def skip_on_tcp_transport(request):
if request.config.getoption("--transport") == "tcp":
pytest.skip("Multimaster under the TPC transport is not working. See #59053")
@pytest.fixture(scope="package")
def salt_mm_master_1(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
config_overrides = {
"interface": "127.0.0.1",
}
factory = salt_factories.salt_master_daemon(
"mm-master-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_1_salt_cli(salt_mm_master_1):
return salt_mm_master_1.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_master_2(salt_factories, salt_mm_master_1):
if salt.utils.platform.is_darwin() or salt.utils.platform.is_freebsd():
subprocess.check_output(["ifconfig", "lo0", "alias", "127.0.0.2", "up"])
config_defaults = {
"open_mode": True,
"transport": salt_mm_master_1.config["transport"],
}
config_overrides = {
"interface": "127.0.0.2",
}
# Use the same ports for both masters, they are binding to different interfaces
for key in (
"ret_port",
"publish_port",
):
config_overrides[key] = salt_mm_master_1.config[key]
factory = salt_factories.salt_master_daemon(
"mm-master-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_2_salt_cli(salt_mm_master_2):
return salt_mm_master_2.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_1.salt_minion_daemon(
"mm-minion-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_2.salt_minion_daemon(
"mm-minion-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory | en | 0.891435 | #59053") # Use the same ports for both masters, they are binding to different interfaces # The secondary salt master depends on the primarily salt master fixture # because we need to clone the keys | 1.835832 | 2 |
supermario/supermario 1117/start_state.py | Kimmiryeong/2DGP_GameProject | 0 | 10417 | import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
| import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
| none | 1 | 2.843141 | 3 |
|
egs/librispeech/ASR/transducer/test_rnn.py | rosrad/icefall | 0 | 10418 | <reponame>rosrad/icefall<gh_stars>0
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transducer.rnn import (
LayerNormGRU,
LayerNormGRUCell,
LayerNormGRULayer,
LayerNormLSTM,
LayerNormLSTMCell,
LayerNormLSTMLayer,
)
def get_devices():
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda", 0))
return devices
def assert_allclose(a: torch.Tensor, b: torch.Tensor, atol=1e-6, **kwargs):
assert torch.allclose(
a, b, atol=atol, **kwargs
), f"{(a - b).abs().max()}, {a.numel()}"
def test_layernorm_lstm_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
cell = LayerNormLSTMCell(
input_size=input_size,
hidden_size=hidden_size,
bias=bias,
device=device,
)
torch.jit.script(cell)
def test_layernorm_lstm_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_lstm_cell_with_projection_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(self_cell)
def test_layernorm_lstm_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
torch_h, torch_c = torch_cell(x_clone, (h, c))
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
(
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum().backward()
(
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_lstm_cell_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
proj_size=proj_size,
device=device,
)
torch_cell = nn.LSTM(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
batch_first=True,
).to(device)
with torch.no_grad():
for name, self_param in self_cell.named_parameters():
getattr(torch_cell, f"{name}_l0").copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
_, (torch_h, torch_c) = torch_cell(
x_clone.unsqueeze(1), (h.unsqueeze(0), c.unsqueeze(0))
)
torch_h = torch_h.squeeze(0)
torch_c = torch_c.squeeze(0)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
(self_h.sum() * self_c.sum()).backward()
(torch_h.sum() * torch_c.sum()).backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_project_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
proj_size=proj_size,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_y.sum().backward()
torch_y.sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
self_hc_sum = (
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum()
torch_hc_sum = (
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum()
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
(self_hc_sum + self_y_sum).backward()
(torch_hc_sum + torch_y_sum).backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_lstm_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_with_projection_jit(device="cpu"):
input_size = 2
hidden_size = 5
proj_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_lstm_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, proj_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_gru_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
cell = LayerNormGRUCell(
input_size=input_size,
hidden_size=hidden_size,
bias=True,
device=device,
)
torch.jit.script(cell)
def test_layernorm_gru_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_gru_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h = self_cell(x.clone(), h)
torch_h = torch_cell(x_clone, h)
assert_allclose(self_h, torch_h, atol=1e-5)
(
self_h.reshape(-1) * torch.arange(self_h.numel(), device=device)
).sum().backward()
(
torch_h.reshape(-1) * torch.arange(torch_h.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_gru_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormGRULayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_gru_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormGRULayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, self_h = self_layer(x, h.clone())
torch_layer = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, torch_h = torch_layer(x_clone, h.unsqueeze(0))
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
self_y_sum.backward()
torch_y_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_gru_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(gru)
def test_layernorm_gru_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_gru = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_gru.state_dict()) == len(torch_gru.state_dict())
with torch.no_grad():
for name, param in self_gru.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_gru, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
states = [
torch.rand(N, hidden_size, device=device) for _ in range(num_layers)
]
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_gru(x, states)
torch_y, torch_states = torch_gru(x_clone, torch.stack(states))
assert_allclose(self_y, torch_y)
self_states = torch.stack(self_states)
assert_allclose(self_states, torch_states)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
s_state_sum = s_sum + self_states.sum()
t_state_sum = t_sum + torch_states.sum()
s_state_sum.backward()
t_state_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-2)
def _test_lstm(device):
test_layernorm_lstm_cell_jit(device)
test_layernorm_lstm_cell_constructor(device)
test_layernorm_lstm_cell_with_projection_jit(device)
test_layernorm_lstm_cell_forward(device)
test_layernorm_lstm_cell_with_projection_forward(device)
#
test_layernorm_lstm_layer_jit(device)
test_layernorm_lstm_layer_with_project_jit(device)
test_layernorm_lstm_layer_forward(device)
test_layernorm_lstm_layer_with_projection_forward(device)
test_layernorm_lstm_jit(device)
test_layernorm_lstm_with_projection_jit(device)
test_layernorm_lstm_forward(device)
test_layernorm_lstm_with_projection_forward(device)
def _test_gru(device):
test_layernorm_gru_cell_jit(device)
test_layernorm_gru_cell_constructor(device)
test_layernorm_gru_cell_forward(device)
#
test_layernorm_gru_layer_jit(device)
test_layernorm_gru_layer_forward(device)
#
test_layernorm_gru_jit(device)
test_layernorm_gru_forward(device)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def main():
for device in get_devices():
print("device", device)
_test_lstm(device)
_test_gru(device)
if __name__ == "__main__":
torch.manual_seed(20211202)
main()
| #!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transducer.rnn import (
LayerNormGRU,
LayerNormGRUCell,
LayerNormGRULayer,
LayerNormLSTM,
LayerNormLSTMCell,
LayerNormLSTMLayer,
)
def get_devices():
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda", 0))
return devices
def assert_allclose(a: torch.Tensor, b: torch.Tensor, atol=1e-6, **kwargs):
assert torch.allclose(
a, b, atol=atol, **kwargs
), f"{(a - b).abs().max()}, {a.numel()}"
def test_layernorm_lstm_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
cell = LayerNormLSTMCell(
input_size=input_size,
hidden_size=hidden_size,
bias=bias,
device=device,
)
torch.jit.script(cell)
def test_layernorm_lstm_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_lstm_cell_with_projection_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(self_cell)
def test_layernorm_lstm_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
torch_h, torch_c = torch_cell(x_clone, (h, c))
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
(
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum().backward()
(
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_lstm_cell_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
proj_size=proj_size,
device=device,
)
torch_cell = nn.LSTM(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
batch_first=True,
).to(device)
with torch.no_grad():
for name, self_param in self_cell.named_parameters():
getattr(torch_cell, f"{name}_l0").copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
_, (torch_h, torch_c) = torch_cell(
x_clone.unsqueeze(1), (h.unsqueeze(0), c.unsqueeze(0))
)
torch_h = torch_h.squeeze(0)
torch_c = torch_c.squeeze(0)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
(self_h.sum() * self_c.sum()).backward()
(torch_h.sum() * torch_c.sum()).backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_project_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
proj_size=proj_size,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_y.sum().backward()
torch_y.sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
self_hc_sum = (
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum()
torch_hc_sum = (
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum()
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
(self_hc_sum + self_y_sum).backward()
(torch_hc_sum + torch_y_sum).backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_lstm_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_with_projection_jit(device="cpu"):
input_size = 2
hidden_size = 5
proj_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_lstm_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, proj_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_gru_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
cell = LayerNormGRUCell(
input_size=input_size,
hidden_size=hidden_size,
bias=True,
device=device,
)
torch.jit.script(cell)
def test_layernorm_gru_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_gru_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h = self_cell(x.clone(), h)
torch_h = torch_cell(x_clone, h)
assert_allclose(self_h, torch_h, atol=1e-5)
(
self_h.reshape(-1) * torch.arange(self_h.numel(), device=device)
).sum().backward()
(
torch_h.reshape(-1) * torch.arange(torch_h.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_gru_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormGRULayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_gru_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormGRULayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, self_h = self_layer(x, h.clone())
torch_layer = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, torch_h = torch_layer(x_clone, h.unsqueeze(0))
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
self_y_sum.backward()
torch_y_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_gru_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(gru)
def test_layernorm_gru_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_gru = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_gru.state_dict()) == len(torch_gru.state_dict())
with torch.no_grad():
for name, param in self_gru.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_gru, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
states = [
torch.rand(N, hidden_size, device=device) for _ in range(num_layers)
]
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_gru(x, states)
torch_y, torch_states = torch_gru(x_clone, torch.stack(states))
assert_allclose(self_y, torch_y)
self_states = torch.stack(self_states)
assert_allclose(self_states, torch_states)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
s_state_sum = s_sum + self_states.sum()
t_state_sum = t_sum + torch_states.sum()
s_state_sum.backward()
t_state_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-2)
def _test_lstm(device):
test_layernorm_lstm_cell_jit(device)
test_layernorm_lstm_cell_constructor(device)
test_layernorm_lstm_cell_with_projection_jit(device)
test_layernorm_lstm_cell_forward(device)
test_layernorm_lstm_cell_with_projection_forward(device)
#
test_layernorm_lstm_layer_jit(device)
test_layernorm_lstm_layer_with_project_jit(device)
test_layernorm_lstm_layer_forward(device)
test_layernorm_lstm_layer_with_projection_forward(device)
test_layernorm_lstm_jit(device)
test_layernorm_lstm_with_projection_jit(device)
test_layernorm_lstm_forward(device)
test_layernorm_lstm_with_projection_forward(device)
def _test_gru(device):
test_layernorm_gru_cell_jit(device)
test_layernorm_gru_cell_constructor(device)
test_layernorm_gru_cell_forward(device)
#
test_layernorm_gru_layer_jit(device)
test_layernorm_gru_layer_forward(device)
#
test_layernorm_gru_jit(device)
test_layernorm_gru_forward(device)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def main():
for device in get_devices():
print("device", device)
_test_lstm(device)
_test_gru(device)
if __name__ == "__main__":
torch.manual_seed(20211202)
main() | en | 0.843535 | #!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: <NAME>) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # name has the form layers.0.cell.weight_hh # name has the form layers.0.cell.weight_hh # name has the form layers.0.cell.weight_hh # # # | 2.148093 | 2 |
settings.py | SalinderSidhu/CHIP8 | 4 | 10419 | <filename>settings.py
import configparser
class Settings:
'''The Settings class is a wrapper for configparser and it's functions.
This class simplifies the tasks of loading, storing and manipulating
settings data.'''
def __init__(self, filename):
'''Create a new Settings object with a specific file name.'''
# Exceptions
self.__settingException = Exception(
'Cannot find specified setting data!')
# Settings variables
self.__filename = filename
self.__config = configparser.ConfigParser()
# Load settings from existing file (if one exists)
self.__isEmpty = len(self.__config.read(self.__filename)) == 0
def isEmpty(self):
'''Return True if there is not settings data loaded, otherwise return
False.'''
return self.__isEmpty
def addNewSetting(self, category, settingDict):
'''Add a new setting with the specified category and data. Save the new
settings data to a file.'''
self.__config[category] = settingDict.copy()
self.__saveAllSettings()
self.__isEmpty = False
def getSetting(self, category, key):
'''Return a setting value from the specified category and setting
key.'''
try:
return self.__config.get(category, key)
except KeyError:
raise self.__settingException
def editSetting(self, category, key, value):
'''Change an existing setting with a specified category and setting key
to the value specified. Save the new settings data to a file.'''
try:
self.__config.set(category, key, str(value))
self.__saveAllSettings()
except KeyError:
raise self.__settingException
def __saveAllSettings(self):
'''Write the current settings data to a file.'''
with open(self.__filename, 'w') as configFile:
self.__config.write(configFile)
| <filename>settings.py
import configparser
class Settings:
'''The Settings class is a wrapper for configparser and it's functions.
This class simplifies the tasks of loading, storing and manipulating
settings data.'''
def __init__(self, filename):
'''Create a new Settings object with a specific file name.'''
# Exceptions
self.__settingException = Exception(
'Cannot find specified setting data!')
# Settings variables
self.__filename = filename
self.__config = configparser.ConfigParser()
# Load settings from existing file (if one exists)
self.__isEmpty = len(self.__config.read(self.__filename)) == 0
def isEmpty(self):
'''Return True if there is not settings data loaded, otherwise return
False.'''
return self.__isEmpty
def addNewSetting(self, category, settingDict):
'''Add a new setting with the specified category and data. Save the new
settings data to a file.'''
self.__config[category] = settingDict.copy()
self.__saveAllSettings()
self.__isEmpty = False
def getSetting(self, category, key):
'''Return a setting value from the specified category and setting
key.'''
try:
return self.__config.get(category, key)
except KeyError:
raise self.__settingException
def editSetting(self, category, key, value):
'''Change an existing setting with a specified category and setting key
to the value specified. Save the new settings data to a file.'''
try:
self.__config.set(category, key, str(value))
self.__saveAllSettings()
except KeyError:
raise self.__settingException
def __saveAllSettings(self):
'''Write the current settings data to a file.'''
with open(self.__filename, 'w') as configFile:
self.__config.write(configFile)
| en | 0.679763 | The Settings class is a wrapper for configparser and it's functions. This class simplifies the tasks of loading, storing and manipulating settings data. Create a new Settings object with a specific file name. # Exceptions # Settings variables # Load settings from existing file (if one exists) Return True if there is not settings data loaded, otherwise return False. Add a new setting with the specified category and data. Save the new settings data to a file. Return a setting value from the specified category and setting key. Change an existing setting with a specified category and setting key to the value specified. Save the new settings data to a file. Write the current settings data to a file. | 3.773241 | 4 |
demisto_sdk/commands/common/hook_validations/release_notes.py | yalonso7/demisto-sdk | 0 | 10420 | <filename>demisto_sdk/commands/common/hook_validations/release_notes.py
from __future__ import print_function
import itertools
from demisto_sdk.commands.common.constants import VALIDATED_PACK_ITEM_TYPES
from demisto_sdk.commands.common.errors import Errors
from demisto_sdk.commands.common.hook_validations.base_validator import \
BaseValidator
from demisto_sdk.commands.common.tools import (get_latest_release_notes_text,
get_release_notes_file_path)
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
class ReleaseNotesValidator(BaseValidator):
"""Release notes validator is designed to ensure the existence and correctness of the release notes in content repo.
Attributes:
file_path (str): the path to the file we are examining at the moment.
release_notes_path (str): the path to the changelog file of the examined file.
latest_release_notes (str): the text of the UNRELEASED section in the changelog file.
master_diff (str): the changes in the changelog file compared to origin/master.
"""
def __init__(self, file_path, modified_files=None, pack_name=None, added_files=None, ignored_errors=None,
print_as_warnings=False):
super().__init__(ignored_errors=ignored_errors, print_as_warnings=print_as_warnings)
self.file_path = file_path
self.modified_files = modified_files
self.added_files = added_files
self.pack_name = pack_name
self.release_notes_path = get_release_notes_file_path(self.file_path)
self.latest_release_notes = get_latest_release_notes_text(self.release_notes_path)
def are_release_notes_complete(self):
is_valid = True
modified_added_files = itertools.chain.from_iterable((self.added_files or [], self.modified_files or []))
if modified_added_files:
for file in modified_added_files:
if not any(permitted_type in file for permitted_type in VALIDATED_PACK_ITEM_TYPES):
continue
elif self.pack_name in file:
update_rn_util = UpdateRN(pack=self.pack_name, pack_files=set(), update_type=None,
added_files=set())
file_name, file_type = update_rn_util.identify_changed_file_type(file)
if file_name and file_type:
if (file_type not in self.latest_release_notes) or (file_name not in self.latest_release_notes):
entity_name = update_rn_util.get_display_name(file)
error_message, error_code = Errors.missing_release_notes_entry(file_type, self.pack_name,
entity_name)
if self.handle_error(error_message, error_code, self.file_path):
is_valid = False
return is_valid
def has_release_notes_been_filled_out(self):
release_notes_comments = self.latest_release_notes
if len(release_notes_comments) == 0:
error_message, error_code = Errors.release_notes_file_empty()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
elif '%%UPDATE_RN%%' in release_notes_comments:
error_message, error_code = Errors.release_notes_not_finished()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
return True
def is_file_valid(self):
"""Checks if given file is valid.
Return:
bool. True if file's release notes are valid, False otherwise.
"""
validations = [
self.has_release_notes_been_filled_out(),
self.are_release_notes_complete()
]
return all(validations)
| <filename>demisto_sdk/commands/common/hook_validations/release_notes.py
from __future__ import print_function
import itertools
from demisto_sdk.commands.common.constants import VALIDATED_PACK_ITEM_TYPES
from demisto_sdk.commands.common.errors import Errors
from demisto_sdk.commands.common.hook_validations.base_validator import \
BaseValidator
from demisto_sdk.commands.common.tools import (get_latest_release_notes_text,
get_release_notes_file_path)
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
class ReleaseNotesValidator(BaseValidator):
"""Release notes validator is designed to ensure the existence and correctness of the release notes in content repo.
Attributes:
file_path (str): the path to the file we are examining at the moment.
release_notes_path (str): the path to the changelog file of the examined file.
latest_release_notes (str): the text of the UNRELEASED section in the changelog file.
master_diff (str): the changes in the changelog file compared to origin/master.
"""
def __init__(self, file_path, modified_files=None, pack_name=None, added_files=None, ignored_errors=None,
print_as_warnings=False):
super().__init__(ignored_errors=ignored_errors, print_as_warnings=print_as_warnings)
self.file_path = file_path
self.modified_files = modified_files
self.added_files = added_files
self.pack_name = pack_name
self.release_notes_path = get_release_notes_file_path(self.file_path)
self.latest_release_notes = get_latest_release_notes_text(self.release_notes_path)
def are_release_notes_complete(self):
is_valid = True
modified_added_files = itertools.chain.from_iterable((self.added_files or [], self.modified_files or []))
if modified_added_files:
for file in modified_added_files:
if not any(permitted_type in file for permitted_type in VALIDATED_PACK_ITEM_TYPES):
continue
elif self.pack_name in file:
update_rn_util = UpdateRN(pack=self.pack_name, pack_files=set(), update_type=None,
added_files=set())
file_name, file_type = update_rn_util.identify_changed_file_type(file)
if file_name and file_type:
if (file_type not in self.latest_release_notes) or (file_name not in self.latest_release_notes):
entity_name = update_rn_util.get_display_name(file)
error_message, error_code = Errors.missing_release_notes_entry(file_type, self.pack_name,
entity_name)
if self.handle_error(error_message, error_code, self.file_path):
is_valid = False
return is_valid
def has_release_notes_been_filled_out(self):
release_notes_comments = self.latest_release_notes
if len(release_notes_comments) == 0:
error_message, error_code = Errors.release_notes_file_empty()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
elif '%%UPDATE_RN%%' in release_notes_comments:
error_message, error_code = Errors.release_notes_not_finished()
if self.handle_error(error_message, error_code, file_path=self.file_path):
return False
return True
def is_file_valid(self):
"""Checks if given file is valid.
Return:
bool. True if file's release notes are valid, False otherwise.
"""
validations = [
self.has_release_notes_been_filled_out(),
self.are_release_notes_complete()
]
return all(validations)
| en | 0.803394 | Release notes validator is designed to ensure the existence and correctness of the release notes in content repo. Attributes: file_path (str): the path to the file we are examining at the moment. release_notes_path (str): the path to the changelog file of the examined file. latest_release_notes (str): the text of the UNRELEASED section in the changelog file. master_diff (str): the changes in the changelog file compared to origin/master. Checks if given file is valid. Return: bool. True if file's release notes are valid, False otherwise. | 2.0718 | 2 |
PyOpenGL/PyGame/ex06/src/mathematics.py | hoppfull/Legacy-Python | 0 | 10421 | import numpy as np
class ProjectionMatrix():
"""This matrix provides projection distortion.
Projection distortion is when things that are far away
appear smaller and things that are close appear bigger.
This works flawlessly so far. Takes in screen-size and
provides near- and far clipping. fov is field-of-view
and smaller values will make view zoom in. A value of 1
will provide a panorama image."""
def __init__(self, screen_size, zNear, zFar, fov):
if fov >= 1: # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result.
fov = 0.99999;
tanHalfFOV = np.tan(fov * np.pi / 2.0)
zRange = zNear - zFar;
self.projectionMatrix = np.array([
[ # Row 0:
screen_size[1] / (tanHalfFOV * screen_size[0]),
0,
0,
0
],
[ # Row 1:
0,
1.0 / tanHalfFOV,
0,
0
],
[ # Row 2:
0,
0,
(-zNear - zFar)/zRange,
2.0 * zFar * zNear / zRange
],
[ # Row 3:
0,
0,
1,
0
],
], dtype=np.float32)
def get(self):
return self.projectionMatrix
class ViewMatrix():
"""This matrix transform a model as if it's percieved by a
camera with a target 'self.t' in global world coordinates
and a position 'self.p' in global world coordinates. Global
coordinates are x=right, y=forth and z=up."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
# target coordinates:
self.t = vec3(0, 0, 0)
# tolerance value:
self.tolerance = 0.5
"""The tolerance value is for testing when view lies within bounds.
In case of 'self.orbitTarget()', it's for testing when view gets too
close to target z-axis. In case of 'self.approachTarget()', it's for
testing when view gets too close to target coordinates."""
# Sensitivity value:
self.alpha = 0.01
"""The sensitivity value is for tuning how sensitive 'self.orbitTarget()'
and 'self.approachTarget()' are to user input."""
# Initialize the rotationMatrix as the identity matrix:
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def setPos(self, p):
self.p = vec3(p.x, p.y, p.z)
def lookAt(self, target=None, up=None):
"""This function focuses the view on a target.
Tested and seem to work as it should... ........finally........"""
if target != None:
self.t = vec3(target.x, target.y, target.z)
f = self.t.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, s.y, s.z, 0],
[ u.x, u.y, u.z, 0],
[ f.x, f.y, f.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def approachTarget(self, amount):
"""This function approaches the view towards the target
when amount is positive and moves away from the target when
amount is negative. It will stay outside the self.tolerance
distance. When completely close to the target, view cannot
look up or down too much."""
if amount == 0:
# If amount is zero, do nothing.
return
if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:
# If 'self.approachTarget()' will not take the view within twice the
# tolerance distance, approach the target by given amount:
self.p = self.p.add(self.t.sub(self.p).scale(amount))
def orbitTarget(self, axis):
if axis == (0, 0):
return # Do nothing
# Get target2camera-vector:
p = self.p.sub(self.t)
# Assign passed values to variables we can change if we have to:
axis_x = -axis[0]
if axis[1] > 0.30/self.alpha:
"""If axis[1] is bigger than 0.40 / self.alpha, we get strange results
becouse view can 'tunnel' over the boundary set when getting view is
getting close to target z-axis. Changing tolerance doen't change it a
whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is
really really large as it is."""
axis_y = 0.3 / self.alpha
elif axis[1] < -0.30/self.alpha:
axis_y = -0.3 / self.alpha
else:
axis_y = axis[1]
if axis_y > 0 and p.z > 0:
"""Tests if user is trying to orbit the view up
and if the view is above the 'equator'. The second
test is to make sure the view doesn't get stuck
if it gets inside the tolerance bounds and can get back
out as long as it's trying to move away."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
elif axis_y < 0 and p.z < 0:
"""Tests if user is trying to orbit the view down
and if the view is below the 'equator'. Same test
but for different case as the one above."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
if axis_y == 0: #If the other axis is zero:
# Amount of rotation for target-cam x-axis: (longitude, west2east)
v = vec3(0, 0, 1) # v is up vector
rate = axis_x
elif axis_x == 0: #If the other axis is zero:
# Amount of rotation for target-cam y-axis: (latitude, south2north)
v = p.cross(vec3(0, 0, 1)).norm() # v is side vector
rate = axis_y
else: #If neither is zero
# u is up vector:
u = vec3(0, 0, axis_x)
# s is side vector:
s = p.cross(vec3(0, 0, 1)).norm().scale(axis_y)
# v is combined vector:
v = u.add(s).norm()
rate = abs(axis_x) + abs(axis_y)
sin = np.sin(self.alpha * rate)
cos = np.cos(self.alpha * rate)
rotateMatrix = np.matrix([
[ # Row 0:
( v.x*v.x*(1 - cos) + cos ),
( v.y*v.x*(1 - cos) - v.z*sin ),
( v.z*v.x*(1 - cos) + v.y*sin ),
0
],
[ # Row 1:
( v.x*v.y*(1 - cos) + v.z*sin ),
( v.y*v.y*(1 - cos) + cos ),
( v.z*v.y*(1 - cos) - v.x*sin ),
0
],
[ # Row 2:
( v.x*v.z*(1 - cos) - v.y*sin ),
( v.y*v.z*(1 - cos) + v.x*sin ),
( v.z*v.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32)
p = rotateMatrix.dot( np.array([p.x, p.y, p.z, 1.0]) ).getA()[0][0:3]
self.p = vec3(p[0], p[1], p[2]).add(self.t)
self.lookAt(self.t)
def get(self):
translationMatrix = np.matrix([
[1,0,0,-self.p.x],
[0,1,0,-self.p.y],
[0,0,1,-self.p.z],
[0,0,0,1]
], dtype=np.float32)
return (self.rotationMatrix*translationMatrix).getA()
class ModelMatrix():
"""This matrix transform a model into world coordinates.
Heavily tested and should work properly. Could probably
be optimized further or even translated into cython for
performance."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
self.s = vec3(1, 1, 1)
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def rotate(self, turns, unit):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
u = unit.norm()
sin = np.sin(turns * np.pi * 2)
cos = np.cos(turns * np.pi * 2)
self.rotationMatrix = self.rotationMatrix.dot(
np.matrix([
[ # Row 0:
( u.x*u.x*(1 - cos) + cos ),
( u.y*u.x*(1 - cos) - u.z*sin ),
( u.z*u.x*(1 - cos) + u.y*sin ),
0
],
[ # Row 1:
( u.x*u.y*(1 - cos) + u.z*sin ),
( u.y*u.y*(1 - cos) + cos ),
( u.z*u.y*(1 - cos) - u.x*sin ),
0
],
[ # Row 2:
( u.x*u.z*(1 - cos) - u.y*sin ),
( u.y*u.z*(1 - cos) + u.x*sin ),
( u.z*u.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32))
def scale(self, s):
self.s = vec3(s.x, s.y, s.z)
def lookAt(self, target, up=None):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
# Get normalized vector pointing from model to target
f = target.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
# s must be normalized! Consider when f and u are not perpendicular!
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, f.x, u.x, 0],
[ s.y, f.y, u.y, 0],
[ s.z, f.z, u.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def get(self):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
translationMatrix = np.matrix([
[1,0,0,self.p.x],
[0,1,0,self.p.y],
[0,0,1,self.p.z],
[0,0,0,1]
], dtype=np.float32)
scaleMatrix = np.matrix([
[self.s.x,0,0,0],
[0,self.s.y,0,0],
[0,0,self.s.z,0],
[0,0,0,1]
], dtype=np.float32)
return (translationMatrix*self.rotationMatrix*scaleMatrix).getA()
class quaternion():
def __init__(self, x, y, z, w):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
def mag(self): # Get length of quaternion
return np.sqrt(self.x*self.x + self.y*self.y + self.y*self.y + self.y*self.y)
def norm(self): # Normalize quaternion
return quaternion(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag(),
w= self.w / self.mag())
def conjugate(self):
return quaternion(
x=-self.x,
y=-self.y,
z=-self.z,
w= self.w)
def xQ(self, q): # Multiply with quaternion
return quaternion(
x= self.x * q.w + self.w * q.x + self.y * q.z - self.z * q.y,
y= self.y * q.w + self.w * q.y + self.z * q.x - self.x * q.z,
z= self.z * q.w + self.w * q.z + self.x * q.y - self.y * q.x,
w= self.w * q.w - self.x * q.x - self.y * q.y - self.z * q.z)
def xV(self, v): # Multiply with vector
return quaternion(
x= self.w*v.x + self.y*v.z - self.z*v.y,
y= self.w*v.y + self.z*v.x - self.x*v.z,
z= self.w*v.z + self.x*v.y - self.y*v.x,
w=-self.x*v.x - self.y*v.y - self.z*v.z)
class vec2():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y)
def norm(self):
return vec2(
x= self.x / self.mag(),
y= self.y / self.mag())
class vec3():
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def cross(self, vector):
return vec3(
x= self.y*vector.z - self.z*vector.y,
y= self.z*vector.x - self.x*vector.z,
z= self.x*vector.y - self.y*vector.x)
def dot(self, vector):
return float( self.x*vector.x + self.y*vector.y + self.z*vector.z )
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def norm(self):
return vec3(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag())
def add(self, vector):
return vec3(
x= self.x + vector.x,
y= self.y + vector.y,
z= self.z + vector.z)
def sub(self, vector):
return vec3(
x= self.x - vector.x,
y= self.y - vector.y,
z= self.z - vector.z)
def scale(self, scalar):
return vec3(
self.x*scalar,
self.y*scalar,
self.z*scalar)
def rotate(self, angle, axis):
pass
| import numpy as np
class ProjectionMatrix():
"""This matrix provides projection distortion.
Projection distortion is when things that are far away
appear smaller and things that are close appear bigger.
This works flawlessly so far. Takes in screen-size and
provides near- and far clipping. fov is field-of-view
and smaller values will make view zoom in. A value of 1
will provide a panorama image."""
def __init__(self, screen_size, zNear, zFar, fov):
if fov >= 1: # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result.
fov = 0.99999;
tanHalfFOV = np.tan(fov * np.pi / 2.0)
zRange = zNear - zFar;
self.projectionMatrix = np.array([
[ # Row 0:
screen_size[1] / (tanHalfFOV * screen_size[0]),
0,
0,
0
],
[ # Row 1:
0,
1.0 / tanHalfFOV,
0,
0
],
[ # Row 2:
0,
0,
(-zNear - zFar)/zRange,
2.0 * zFar * zNear / zRange
],
[ # Row 3:
0,
0,
1,
0
],
], dtype=np.float32)
def get(self):
return self.projectionMatrix
class ViewMatrix():
"""This matrix transform a model as if it's percieved by a
camera with a target 'self.t' in global world coordinates
and a position 'self.p' in global world coordinates. Global
coordinates are x=right, y=forth and z=up."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
# target coordinates:
self.t = vec3(0, 0, 0)
# tolerance value:
self.tolerance = 0.5
"""The tolerance value is for testing when view lies within bounds.
In case of 'self.orbitTarget()', it's for testing when view gets too
close to target z-axis. In case of 'self.approachTarget()', it's for
testing when view gets too close to target coordinates."""
# Sensitivity value:
self.alpha = 0.01
"""The sensitivity value is for tuning how sensitive 'self.orbitTarget()'
and 'self.approachTarget()' are to user input."""
# Initialize the rotationMatrix as the identity matrix:
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def setPos(self, p):
self.p = vec3(p.x, p.y, p.z)
def lookAt(self, target=None, up=None):
"""This function focuses the view on a target.
Tested and seem to work as it should... ........finally........"""
if target != None:
self.t = vec3(target.x, target.y, target.z)
f = self.t.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, s.y, s.z, 0],
[ u.x, u.y, u.z, 0],
[ f.x, f.y, f.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def approachTarget(self, amount):
"""This function approaches the view towards the target
when amount is positive and moves away from the target when
amount is negative. It will stay outside the self.tolerance
distance. When completely close to the target, view cannot
look up or down too much."""
if amount == 0:
# If amount is zero, do nothing.
return
if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:
# If 'self.approachTarget()' will not take the view within twice the
# tolerance distance, approach the target by given amount:
self.p = self.p.add(self.t.sub(self.p).scale(amount))
def orbitTarget(self, axis):
if axis == (0, 0):
return # Do nothing
# Get target2camera-vector:
p = self.p.sub(self.t)
# Assign passed values to variables we can change if we have to:
axis_x = -axis[0]
if axis[1] > 0.30/self.alpha:
"""If axis[1] is bigger than 0.40 / self.alpha, we get strange results
becouse view can 'tunnel' over the boundary set when getting view is
getting close to target z-axis. Changing tolerance doen't change it a
whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is
really really large as it is."""
axis_y = 0.3 / self.alpha
elif axis[1] < -0.30/self.alpha:
axis_y = -0.3 / self.alpha
else:
axis_y = axis[1]
if axis_y > 0 and p.z > 0:
"""Tests if user is trying to orbit the view up
and if the view is above the 'equator'. The second
test is to make sure the view doesn't get stuck
if it gets inside the tolerance bounds and can get back
out as long as it's trying to move away."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
elif axis_y < 0 and p.z < 0:
"""Tests if user is trying to orbit the view down
and if the view is below the 'equator'. Same test
but for different case as the one above."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
if axis_y == 0: #If the other axis is zero:
# Amount of rotation for target-cam x-axis: (longitude, west2east)
v = vec3(0, 0, 1) # v is up vector
rate = axis_x
elif axis_x == 0: #If the other axis is zero:
# Amount of rotation for target-cam y-axis: (latitude, south2north)
v = p.cross(vec3(0, 0, 1)).norm() # v is side vector
rate = axis_y
else: #If neither is zero
# u is up vector:
u = vec3(0, 0, axis_x)
# s is side vector:
s = p.cross(vec3(0, 0, 1)).norm().scale(axis_y)
# v is combined vector:
v = u.add(s).norm()
rate = abs(axis_x) + abs(axis_y)
sin = np.sin(self.alpha * rate)
cos = np.cos(self.alpha * rate)
rotateMatrix = np.matrix([
[ # Row 0:
( v.x*v.x*(1 - cos) + cos ),
( v.y*v.x*(1 - cos) - v.z*sin ),
( v.z*v.x*(1 - cos) + v.y*sin ),
0
],
[ # Row 1:
( v.x*v.y*(1 - cos) + v.z*sin ),
( v.y*v.y*(1 - cos) + cos ),
( v.z*v.y*(1 - cos) - v.x*sin ),
0
],
[ # Row 2:
( v.x*v.z*(1 - cos) - v.y*sin ),
( v.y*v.z*(1 - cos) + v.x*sin ),
( v.z*v.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32)
p = rotateMatrix.dot( np.array([p.x, p.y, p.z, 1.0]) ).getA()[0][0:3]
self.p = vec3(p[0], p[1], p[2]).add(self.t)
self.lookAt(self.t)
def get(self):
translationMatrix = np.matrix([
[1,0,0,-self.p.x],
[0,1,0,-self.p.y],
[0,0,1,-self.p.z],
[0,0,0,1]
], dtype=np.float32)
return (self.rotationMatrix*translationMatrix).getA()
class ModelMatrix():
"""This matrix transform a model into world coordinates.
Heavily tested and should work properly. Could probably
be optimized further or even translated into cython for
performance."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
self.s = vec3(1, 1, 1)
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def rotate(self, turns, unit):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
u = unit.norm()
sin = np.sin(turns * np.pi * 2)
cos = np.cos(turns * np.pi * 2)
self.rotationMatrix = self.rotationMatrix.dot(
np.matrix([
[ # Row 0:
( u.x*u.x*(1 - cos) + cos ),
( u.y*u.x*(1 - cos) - u.z*sin ),
( u.z*u.x*(1 - cos) + u.y*sin ),
0
],
[ # Row 1:
( u.x*u.y*(1 - cos) + u.z*sin ),
( u.y*u.y*(1 - cos) + cos ),
( u.z*u.y*(1 - cos) - u.x*sin ),
0
],
[ # Row 2:
( u.x*u.z*(1 - cos) - u.y*sin ),
( u.y*u.z*(1 - cos) + u.x*sin ),
( u.z*u.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32))
def scale(self, s):
self.s = vec3(s.x, s.y, s.z)
def lookAt(self, target, up=None):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
# Get normalized vector pointing from model to target
f = target.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
# s must be normalized! Consider when f and u are not perpendicular!
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, f.x, u.x, 0],
[ s.y, f.y, u.y, 0],
[ s.z, f.z, u.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def get(self):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
translationMatrix = np.matrix([
[1,0,0,self.p.x],
[0,1,0,self.p.y],
[0,0,1,self.p.z],
[0,0,0,1]
], dtype=np.float32)
scaleMatrix = np.matrix([
[self.s.x,0,0,0],
[0,self.s.y,0,0],
[0,0,self.s.z,0],
[0,0,0,1]
], dtype=np.float32)
return (translationMatrix*self.rotationMatrix*scaleMatrix).getA()
class quaternion():
def __init__(self, x, y, z, w):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
def mag(self): # Get length of quaternion
return np.sqrt(self.x*self.x + self.y*self.y + self.y*self.y + self.y*self.y)
def norm(self): # Normalize quaternion
return quaternion(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag(),
w= self.w / self.mag())
def conjugate(self):
return quaternion(
x=-self.x,
y=-self.y,
z=-self.z,
w= self.w)
def xQ(self, q): # Multiply with quaternion
return quaternion(
x= self.x * q.w + self.w * q.x + self.y * q.z - self.z * q.y,
y= self.y * q.w + self.w * q.y + self.z * q.x - self.x * q.z,
z= self.z * q.w + self.w * q.z + self.x * q.y - self.y * q.x,
w= self.w * q.w - self.x * q.x - self.y * q.y - self.z * q.z)
def xV(self, v): # Multiply with vector
return quaternion(
x= self.w*v.x + self.y*v.z - self.z*v.y,
y= self.w*v.y + self.z*v.x - self.x*v.z,
z= self.w*v.z + self.x*v.y - self.y*v.x,
w=-self.x*v.x - self.y*v.y - self.z*v.z)
class vec2():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y)
def norm(self):
return vec2(
x= self.x / self.mag(),
y= self.y / self.mag())
class vec3():
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def cross(self, vector):
return vec3(
x= self.y*vector.z - self.z*vector.y,
y= self.z*vector.x - self.x*vector.z,
z= self.x*vector.y - self.y*vector.x)
def dot(self, vector):
return float( self.x*vector.x + self.y*vector.y + self.z*vector.z )
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def norm(self):
return vec3(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag())
def add(self, vector):
return vec3(
x= self.x + vector.x,
y= self.y + vector.y,
z= self.z + vector.z)
def sub(self, vector):
return vec3(
x= self.x - vector.x,
y= self.y - vector.y,
z= self.z - vector.z)
def scale(self, scalar):
return vec3(
self.x*scalar,
self.y*scalar,
self.z*scalar)
def rotate(self, angle, axis):
pass
| en | 0.892765 | This matrix provides projection distortion. Projection distortion is when things that are far away appear smaller and things that are close appear bigger. This works flawlessly so far. Takes in screen-size and provides near- and far clipping. fov is field-of-view and smaller values will make view zoom in. A value of 1 will provide a panorama image. # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result. # Row 0: # Row 1: # Row 2: # Row 3: This matrix transform a model as if it's percieved by a camera with a target 'self.t' in global world coordinates and a position 'self.p' in global world coordinates. Global coordinates are x=right, y=forth and z=up. # target coordinates: # tolerance value: The tolerance value is for testing when view lies within bounds. In case of 'self.orbitTarget()', it's for testing when view gets too close to target z-axis. In case of 'self.approachTarget()', it's for testing when view gets too close to target coordinates. # Sensitivity value: The sensitivity value is for tuning how sensitive 'self.orbitTarget()' and 'self.approachTarget()' are to user input. # Initialize the rotationMatrix as the identity matrix: This function focuses the view on a target. Tested and seem to work as it should... ........finally........ # f x u # s x f, automatically normalized This function approaches the view towards the target when amount is positive and moves away from the target when amount is negative. It will stay outside the self.tolerance distance. When completely close to the target, view cannot look up or down too much. # If amount is zero, do nothing. # If 'self.approachTarget()' will not take the view within twice the # tolerance distance, approach the target by given amount: # Do nothing # Get target2camera-vector: # Assign passed values to variables we can change if we have to: If axis[1] is bigger than 0.40 / self.alpha, we get strange results becouse view can 'tunnel' over the boundary set when getting view is getting close to target z-axis. Changing tolerance doen't change it a whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is really really large as it is. Tests if user is trying to orbit the view up and if the view is above the 'equator'. The second test is to make sure the view doesn't get stuck if it gets inside the tolerance bounds and can get back out as long as it's trying to move away. Tests if user is trying to orbit the view down and if the view is below the 'equator'. Same test but for different case as the one above. #If the other axis is zero: # Amount of rotation for target-cam x-axis: (longitude, west2east) # v is up vector #If the other axis is zero: # Amount of rotation for target-cam y-axis: (latitude, south2north) # v is side vector #If neither is zero # u is up vector: # s is side vector: # v is combined vector: # Row 0: # Row 1: # Row 2: # Row 3: This matrix transform a model into world coordinates. Heavily tested and should work properly. Could probably be optimized further or even translated into cython for performance. Heavily tested and should work! Requires 'GL_TRUE' to be passed to the uniform on shader program to work. # Row 0: # Row 1: # Row 2: # Row 3: Heavily tested and should work! Requires 'GL_TRUE' to be passed to the uniform on shader program to work. # Get normalized vector pointing from model to target # f x u # s must be normalized! Consider when f and u are not perpendicular! # s x f, automatically normalized Heavily tested and should work! Requires 'GL_TRUE' to be passed to the uniform on shader program to work. # Get length of quaternion # Normalize quaternion # Multiply with quaternion # Multiply with vector | 3.3909 | 3 |
test_utils/mocks.py | radomd92/botjagwar | 7 | 10422 | from xml.dom import minidom
import pywikibot
from api.decorator import time_this
SiteMock = pywikibot.Site
class PageMock(pywikibot.Page):
def __init__(self, *args, **kwargs):
super(PageMock, self).__init__(*args, **kwargs)
self.filename = "test_data/test_pages_%s.xml" % self.site.lang
self.parsed = minidom.parse(open(self.filename, 'r'))
self.pages = self.parsed.getElementsByTagName('page')
def put(self, newtext, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None, **kwargs):
print(('Saving page [[%s]] through put' % self.title()))
def save(self, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None,
apply_cosmetic_changes=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
@time_this('Page.get() method mock')
def get(self, force=False, get_redirect=False, sysop=False):
for page in self.pages:
xml_title = page.getElementsByTagName(
'title')[0].childNodes[0].nodeValue
if xml_title == self.title():
return page.getElementsByTagName(
'text')[0].childNodes[0].nodeValue
print(('No page %s found in "%s"' % (self.title(), self.filename)))
return ''
p = PageMock(SiteMock('en', 'wiktionary'), 'gaon')
e = p.get()
| from xml.dom import minidom
import pywikibot
from api.decorator import time_this
SiteMock = pywikibot.Site
class PageMock(pywikibot.Page):
def __init__(self, *args, **kwargs):
super(PageMock, self).__init__(*args, **kwargs)
self.filename = "test_data/test_pages_%s.xml" % self.site.lang
self.parsed = minidom.parse(open(self.filename, 'r'))
self.pages = self.parsed.getElementsByTagName('page')
def put(self, newtext, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None, **kwargs):
print(('Saving page [[%s]] through put' % self.title()))
def save(self, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None,
apply_cosmetic_changes=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
@time_this('Page.get() method mock')
def get(self, force=False, get_redirect=False, sysop=False):
for page in self.pages:
xml_title = page.getElementsByTagName(
'title')[0].childNodes[0].nodeValue
if xml_title == self.title():
return page.getElementsByTagName(
'text')[0].childNodes[0].nodeValue
print(('No page %s found in "%s"' % (self.title(), self.filename)))
return ''
p = PageMock(SiteMock('en', 'wiktionary'), 'gaon')
e = p.get()
| none | 1 | 2.480304 | 2 |
|
dl_tensorflow/deepdream.py | jarvisqi/deep_learning | 32 | 10423 | import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
| import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
| zh | 0.912128 | # 定义t_input为我们输入的图像 # 输入图像需要经过处理才能送入网络中 # expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel] # t_input - imagenet_mean是减去一个均值 # 找到所有卷积层 # 输出卷积层层数 # 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状 # 先在行上做整体移动,再在列上做整体移动 # 将拉普拉斯金字塔还原到原始图像 # 对img做标准化。 # 拉普拉斯金字塔标准化 # 每一层都做一次normalize_std # 这个函数将图像分为低频和高频成分 # 做过一次卷积相当于一次“平滑”,因此lo为低频成分 # 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi # 这个函数将图像img分成n层拉普拉斯金字塔 # 调用lap_split将图像分为低频和高频部分 # 高频部分保存到levels中 # 低频部分再继续分解 # 将lap_normalize转换为正常函数 # 同样将图像进行金字塔分解 # 此时提取高频、低频的方法比较简单。直接缩放就可以 # 先生成低频的图像,再依次放大并加上高频 # 唯一的区别在于我们使用lap_norm_func来标准化g! # g = lap_norm_func(g) # img += g * step | 2.454681 | 2 |
admin.py | BlueBlock/usage-reporter | 4 | 10424 | import calendar
import datetime
import logging
import os
import webapp2
import dbmodel
TESTING = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
class ResetHandler(webapp2.RequestHandler):
def get(self):
timestamp = calendar.timegm(datetime.datetime.utcnow().timetuple())
self.response.write('<html><body><form method="POST"><input type="text" value="' + str(
timestamp) + '" name="day"><input type="submit"></form></body></html>')
def post(self):
timestamp = int(self.request.get('day', None))
entry_day = datetime.datetime.utcfromtimestamp(timestamp).date()
logging.info('Processing day %s', entry_day)
starttimestamp = calendar.timegm((entry_day.year, entry_day.month, entry_day.day, 0, 0, 0))
endtimestamp = starttimestamp + 24 * 60 * 60
logging.info('starttimestamp, endtimestamp: (%s, %s)', starttimestamp, endtimestamp)
count = 0
for item in dbmodel.ReportItem.all().filter('counted', 0).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
for item in dbmodel.ReportItem.all().filter('counted', 1).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
logging.info('Reset for %s items', count)
for item in dbmodel.AggregateItem.all().filter('timestamp =', starttimestamp).filter('rangetype =', 'day'):
item.delete()
app = webapp2.WSGIApplication([
('/tasks/admin/reset', ResetHandler)
], debug=TESTING)
| import calendar
import datetime
import logging
import os
import webapp2
import dbmodel
TESTING = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
class ResetHandler(webapp2.RequestHandler):
def get(self):
timestamp = calendar.timegm(datetime.datetime.utcnow().timetuple())
self.response.write('<html><body><form method="POST"><input type="text" value="' + str(
timestamp) + '" name="day"><input type="submit"></form></body></html>')
def post(self):
timestamp = int(self.request.get('day', None))
entry_day = datetime.datetime.utcfromtimestamp(timestamp).date()
logging.info('Processing day %s', entry_day)
starttimestamp = calendar.timegm((entry_day.year, entry_day.month, entry_day.day, 0, 0, 0))
endtimestamp = starttimestamp + 24 * 60 * 60
logging.info('starttimestamp, endtimestamp: (%s, %s)', starttimestamp, endtimestamp)
count = 0
for item in dbmodel.ReportItem.all().filter('counted', 0).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
for item in dbmodel.ReportItem.all().filter('counted', 1).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
logging.info('Reset for %s items', count)
for item in dbmodel.AggregateItem.all().filter('timestamp =', starttimestamp).filter('rangetype =', 'day'):
item.delete()
app = webapp2.WSGIApplication([
('/tasks/admin/reset', ResetHandler)
], debug=TESTING)
| none | 1 | 2.524477 | 3 |
|
napari/utils/colormaps/categorical_colormap_utils.py | Zac-HD/napari | 1 | 10425 | from dataclasses import dataclass
from itertools import cycle
from typing import Dict, Union
import numpy as np
from ...layers.utils.color_transformations import (
transform_color,
transform_color_cycle,
)
@dataclass(eq=False)
class ColorCycle:
"""A dataclass to hold a color cycle for the fallback_colors
in the CategoricalColormap
Attributes
----------
values : np.ndarray
The (Nx4) color array of all colors contained in the color cycle.
cycle : cycle
The cycle object that gives fallback colors.
"""
values: np.ndarray
cycle: cycle
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
# turn a generic dict into object
if isinstance(val, dict):
return _coerce_colorcycle_from_dict(val)
elif isinstance(val, ColorCycle):
return val
else:
return _coerce_colorcycle_from_colors(val)
def _json_encode(self):
return {'values': self.values.tolist()}
def __eq__(self, other):
if isinstance(other, ColorCycle):
eq = np.array_equal(self.values, other.values)
else:
eq = False
return eq
def _coerce_colorcycle_from_dict(
val: Dict[str, Union[str, list, np.ndarray, cycle]]
) -> ColorCycle:
# validate values
color_values = val.get('values')
if color_values is None:
raise ValueError('ColorCycle requires a values argument')
transformed_color_values = transform_color(color_values)
# validate cycle
color_cycle = val.get('cycle')
if color_cycle is None:
transformed_color_cycle = transform_color_cycle(
color_cycle=color_values,
elem_name='color_cycle',
default="white",
)[0]
else:
transformed_color_cycle = color_cycle
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def _coerce_colorcycle_from_colors(
val: Union[str, list, np.ndarray]
) -> ColorCycle:
if isinstance(val, str):
val = [val]
(
transformed_color_cycle,
transformed_color_values,
) = transform_color_cycle(
color_cycle=val,
elem_name='color_cycle',
default="white",
)
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def compare_colormap_dicts(cmap_1, cmap_2):
if len(cmap_1) != len(cmap_2):
return False
for k, v in cmap_1.items():
if k not in cmap_2:
return False
if not np.allclose(v, cmap_2[k]):
return False
return True
| from dataclasses import dataclass
from itertools import cycle
from typing import Dict, Union
import numpy as np
from ...layers.utils.color_transformations import (
transform_color,
transform_color_cycle,
)
@dataclass(eq=False)
class ColorCycle:
"""A dataclass to hold a color cycle for the fallback_colors
in the CategoricalColormap
Attributes
----------
values : np.ndarray
The (Nx4) color array of all colors contained in the color cycle.
cycle : cycle
The cycle object that gives fallback colors.
"""
values: np.ndarray
cycle: cycle
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
# turn a generic dict into object
if isinstance(val, dict):
return _coerce_colorcycle_from_dict(val)
elif isinstance(val, ColorCycle):
return val
else:
return _coerce_colorcycle_from_colors(val)
def _json_encode(self):
return {'values': self.values.tolist()}
def __eq__(self, other):
if isinstance(other, ColorCycle):
eq = np.array_equal(self.values, other.values)
else:
eq = False
return eq
def _coerce_colorcycle_from_dict(
val: Dict[str, Union[str, list, np.ndarray, cycle]]
) -> ColorCycle:
# validate values
color_values = val.get('values')
if color_values is None:
raise ValueError('ColorCycle requires a values argument')
transformed_color_values = transform_color(color_values)
# validate cycle
color_cycle = val.get('cycle')
if color_cycle is None:
transformed_color_cycle = transform_color_cycle(
color_cycle=color_values,
elem_name='color_cycle',
default="white",
)[0]
else:
transformed_color_cycle = color_cycle
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def _coerce_colorcycle_from_colors(
val: Union[str, list, np.ndarray]
) -> ColorCycle:
if isinstance(val, str):
val = [val]
(
transformed_color_cycle,
transformed_color_values,
) = transform_color_cycle(
color_cycle=val,
elem_name='color_cycle',
default="white",
)
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def compare_colormap_dicts(cmap_1, cmap_2):
if len(cmap_1) != len(cmap_2):
return False
for k, v in cmap_1.items():
if k not in cmap_2:
return False
if not np.allclose(v, cmap_2[k]):
return False
return True
| en | 0.524431 | A dataclass to hold a color cycle for the fallback_colors in the CategoricalColormap Attributes ---------- values : np.ndarray The (Nx4) color array of all colors contained in the color cycle. cycle : cycle The cycle object that gives fallback colors. # turn a generic dict into object # validate values # validate cycle | 2.817931 | 3 |
src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py | stas-prokopiev/ipywidgets_toggle_buttons | 0 | 10426 | """Abstract class for all toggle buttons"""
# Standard library imports
import logging
from collections import OrderedDict
# Third party imports
import ipywidgets
# Local imports
from .abc_toggle_buttons import BaseToggleButtons
from .layouts import DICT_LAYOUT_HBOX_ANY
LOGGER = logging.getLogger(__name__)
class BaseToggleButtonsWithHide(BaseToggleButtons):
"""Abstract class for all toggle buttons
Values are stored in self.widget_parent when displayed is self.widget
Which is updated in the moment when display() is launched
"""
def __init__(
self,
widget_parent,
options_visible=None,
options_hidden=None,
**kwargs
):
"""Initialize object"""
super().__init__(widget_parent, **kwargs)
# hidden attributes to setters
self._options_visible = []
self._options_hidden = []
self._bool_is_hidden_options_created = False
# Create scaffolds inside self.widgets
self._create_scaffold_for_widget()
self._dict_visible_button_by_option = OrderedDict()
self._dict_hidden_button_by_option = OrderedDict()
# Set options
self.options_visible = options_visible
self.options_hidden = options_hidden
self._update_buttons_for_new_options()
@property
def options_visible(self):
"""Getter for visible options used in widget"""
return self._options_visible
@options_visible.setter
def options_visible(self, new_value):
"""Setter for visible options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_visible):
return None
self._options_visible = new_value
self._create_buttons_for_visible_options()
# Update hidden options to delete which exists in new visible
# This will also update the whole widget
self.options_hidden = self._options_hidden
self.options = self._options_visible + self._options_hidden
self._update_widget_view()
@property
def options_hidden(self):
"""Getter for hidden options used in widget"""
return self._options_hidden
@options_hidden.setter
def options_hidden(self, new_value):
"""Setter for hidden options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_hidden):
return None
# Filter out from hidden options all options which exists in main
options_hidden_cleared = []
for str_option in new_value:
if str_option not in self.options_visible:
options_hidden_cleared.append(str_option)
self._options_hidden = options_hidden_cleared
self.options = self._options_visible + self._options_hidden
# self._create_buttons_for_hidden_options()
self._update_widget_view()
def turn_off_all_buttons(self):
"""Mark all buttons as not clicked"""
for str_option in self._dict_visible_button_by_option:
but = self._dict_visible_button_by_option[str_option]
but.button_style = ""
for str_option in self._dict_hidden_button_by_option:
but = self._dict_hidden_button_by_option[str_option]
but.button_style = ""
# Change style of selected hidden button
# self._widget_but_hidden_option_selected.description = "..."
# self._widget_but_hidden_option_selected.button_style = ""
def _update_buttons_for_new_options(self):
"""Update buttons if options were changed"""
self._create_buttons_for_visible_options()
self._bool_is_hidden_options_created = False
# self._create_buttons_for_hidden_options()
def _create_scaffold_for_widget(self):
"""Create scaffold of ipywidget Boxes for self"""
# Main buttons box
self._widget_hbox_main = ipywidgets.HBox()
self._widget_hbox_main.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_main.layout.flex_flow = "row wrap"
# Middle buttons box
self._widget_hbox_middle_buttons = ipywidgets.HBox()
self._widget_hbox_middle_buttons.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
self._create_middle_buttons()
# Hidden buttons box
self._widget_hbox_hidden = ipywidgets.HBox()
self._widget_hbox_hidden.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_hidden.layout.flex_flow = "row wrap"
def _create_buttons_for_visible_options(self):
"""Create buttons for all visible options"""
self._dict_visible_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_visible)
list_buttons = []
for str_option in list(self.options_visible):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_visible_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_main.children = list_buttons
def _create_middle_buttons(self):
"""Create buttons which are in charge what to do with hidden buttons"""
self._wid_but_hide_show = ipywidgets.ToggleButton(
value=False,
description="Show Hidden options",
button_style="info",
)
self._wid_but_hide_show.layout.width = "40%"
self._wid_but_hide_show.observe(
lambda _: self._update_widget_view(), "value")
self._widget_but_hidden_option_selected = ipywidgets.Button(
description="...", disabled=True)
self._widget_but_hidden_option_selected.layout.width = "40%"
self._widget_hbox_middle_buttons.children = [
self._widget_but_hidden_option_selected, self._wid_but_hide_show]
def _create_buttons_for_hidden_options(self):
"""Create buttons for all hidden options"""
self._dict_hidden_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_hidden)
list_buttons = []
for str_option in list(self.options_hidden):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
if str_option in self.value:
but_wid.button_style = "success"
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_hidden_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_hidden.children = list_buttons
| """Abstract class for all toggle buttons"""
# Standard library imports
import logging
from collections import OrderedDict
# Third party imports
import ipywidgets
# Local imports
from .abc_toggle_buttons import BaseToggleButtons
from .layouts import DICT_LAYOUT_HBOX_ANY
LOGGER = logging.getLogger(__name__)
class BaseToggleButtonsWithHide(BaseToggleButtons):
"""Abstract class for all toggle buttons
Values are stored in self.widget_parent when displayed is self.widget
Which is updated in the moment when display() is launched
"""
def __init__(
self,
widget_parent,
options_visible=None,
options_hidden=None,
**kwargs
):
"""Initialize object"""
super().__init__(widget_parent, **kwargs)
# hidden attributes to setters
self._options_visible = []
self._options_hidden = []
self._bool_is_hidden_options_created = False
# Create scaffolds inside self.widgets
self._create_scaffold_for_widget()
self._dict_visible_button_by_option = OrderedDict()
self._dict_hidden_button_by_option = OrderedDict()
# Set options
self.options_visible = options_visible
self.options_hidden = options_hidden
self._update_buttons_for_new_options()
@property
def options_visible(self):
"""Getter for visible options used in widget"""
return self._options_visible
@options_visible.setter
def options_visible(self, new_value):
"""Setter for visible options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_visible):
return None
self._options_visible = new_value
self._create_buttons_for_visible_options()
# Update hidden options to delete which exists in new visible
# This will also update the whole widget
self.options_hidden = self._options_hidden
self.options = self._options_visible + self._options_hidden
self._update_widget_view()
@property
def options_hidden(self):
"""Getter for hidden options used in widget"""
return self._options_hidden
@options_hidden.setter
def options_hidden(self, new_value):
"""Setter for hidden options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_hidden):
return None
# Filter out from hidden options all options which exists in main
options_hidden_cleared = []
for str_option in new_value:
if str_option not in self.options_visible:
options_hidden_cleared.append(str_option)
self._options_hidden = options_hidden_cleared
self.options = self._options_visible + self._options_hidden
# self._create_buttons_for_hidden_options()
self._update_widget_view()
def turn_off_all_buttons(self):
"""Mark all buttons as not clicked"""
for str_option in self._dict_visible_button_by_option:
but = self._dict_visible_button_by_option[str_option]
but.button_style = ""
for str_option in self._dict_hidden_button_by_option:
but = self._dict_hidden_button_by_option[str_option]
but.button_style = ""
# Change style of selected hidden button
# self._widget_but_hidden_option_selected.description = "..."
# self._widget_but_hidden_option_selected.button_style = ""
def _update_buttons_for_new_options(self):
"""Update buttons if options were changed"""
self._create_buttons_for_visible_options()
self._bool_is_hidden_options_created = False
# self._create_buttons_for_hidden_options()
def _create_scaffold_for_widget(self):
"""Create scaffold of ipywidget Boxes for self"""
# Main buttons box
self._widget_hbox_main = ipywidgets.HBox()
self._widget_hbox_main.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_main.layout.flex_flow = "row wrap"
# Middle buttons box
self._widget_hbox_middle_buttons = ipywidgets.HBox()
self._widget_hbox_middle_buttons.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
self._create_middle_buttons()
# Hidden buttons box
self._widget_hbox_hidden = ipywidgets.HBox()
self._widget_hbox_hidden.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_hidden.layout.flex_flow = "row wrap"
def _create_buttons_for_visible_options(self):
"""Create buttons for all visible options"""
self._dict_visible_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_visible)
list_buttons = []
for str_option in list(self.options_visible):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_visible_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_main.children = list_buttons
def _create_middle_buttons(self):
"""Create buttons which are in charge what to do with hidden buttons"""
self._wid_but_hide_show = ipywidgets.ToggleButton(
value=False,
description="Show Hidden options",
button_style="info",
)
self._wid_but_hide_show.layout.width = "40%"
self._wid_but_hide_show.observe(
lambda _: self._update_widget_view(), "value")
self._widget_but_hidden_option_selected = ipywidgets.Button(
description="...", disabled=True)
self._widget_but_hidden_option_selected.layout.width = "40%"
self._widget_hbox_middle_buttons.children = [
self._widget_but_hidden_option_selected, self._wid_but_hide_show]
def _create_buttons_for_hidden_options(self):
"""Create buttons for all hidden options"""
self._dict_hidden_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_hidden)
list_buttons = []
for str_option in list(self.options_hidden):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
if str_option in self.value:
but_wid.button_style = "success"
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_hidden_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_hidden.children = list_buttons
| en | 0.618118 | Abstract class for all toggle buttons # Standard library imports # Third party imports # Local imports Abstract class for all toggle buttons Values are stored in self.widget_parent when displayed is self.widget Which is updated in the moment when display() is launched Initialize object # hidden attributes to setters # Create scaffolds inside self.widgets # Set options Getter for visible options used in widget Setter for visible options in widget Args: new_value (list or tuple): New options to set for widgets # Update hidden options to delete which exists in new visible # This will also update the whole widget Getter for hidden options used in widget Setter for hidden options in widget Args: new_value (list or tuple): New options to set for widgets # Filter out from hidden options all options which exists in main # self._create_buttons_for_hidden_options() Mark all buttons as not clicked # Change style of selected hidden button # self._widget_but_hidden_option_selected.description = "..." # self._widget_but_hidden_option_selected.button_style = "" Update buttons if options were changed # self._create_buttons_for_hidden_options() Create scaffold of ipywidget Boxes for self # Main buttons box # self._widget_hbox_main.layout.flex_flow = "row wrap" # Middle buttons box # Hidden buttons box # self._widget_hbox_hidden.layout.flex_flow = "row wrap" Create buttons for all visible options Create buttons which are in charge what to do with hidden buttons Create buttons for all hidden options | 2.608826 | 3 |
Players/DWPMPlayer.py | jokvedaras/game-framework | 0 | 10427 | __author__ = '<NAME> and <NAME>'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
| __author__ = '<NAME> and <NAME>'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
| en | 0.960794 | # input #0 for rock #1 for paper #2 for scissors # past move is array of numbers # our move followed by their move #Our strategy is to look at all past moves #In a large number of games, you would expect # each move to be seen an even amount of times #So our strategy is to take the least seen move # and expect it to show up soon # so we will play to beat that move adds opponents move to past moves # We use notifications to store opponent's moves in past rounds # Process match-start and round-end messages # At the start of the match, clear opponent moves history since a new match has started # At the end of a round, append move to opponent's move history. Move history is used # to compute the next move played. # Check if this message is for me and only then proceed # In this case, (by convention) the info is a tuple of the moves made and result # e.g. ((1, 0), (1,0)) which # means player 1 played paper (1), the player 2 played rock(0) and the result was that # player 1 won (got 1 point) and player 2 lost (got 0 point) # RPS is a two person game; figure out which of the players is me # and which one is the opponent # Update opponent's past moves history our player assumes that given a high number of games, all 3 different moves of opponent will be used an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move #determine which move has been used least # Test driver # Run by typing "python3 RpsPlayerExample.py" | 3.570548 | 4 |
example/example.py | mowshon/age-and-gender | 81 | 10428 | <filename>example/example.py
from age_and_gender import *
from PIL import Image, ImageDraw, ImageFont
data = AgeAndGender()
data.load_shape_predictor('models/shape_predictor_5_face_landmarks.dat')
data.load_dnn_gender_classifier('models/dnn_gender_classifier_v1.dat')
data.load_dnn_age_predictor('models/dnn_age_predictor_v1.dat')
filename = 'test-image.jpg'
img = Image.open(filename).convert("RGB")
result = data.predict(img)
font = ImageFont.truetype("Acme-Regular.ttf", 20)
for info in result:
shape = [(info['face'][0], info['face'][1]), (info['face'][2], info['face'][3])]
draw = ImageDraw.Draw(img)
gender = info['gender']['value'].title()
gender_percent = int(info['gender']['confidence'])
age = info['age']['value']
age_percent = int(info['age']['confidence'])
draw.text(
(info['face'][0] - 10, info['face'][3] + 10), f"{gender} (~{gender_percent}%)\n{age} y.o. (~{age_percent}%).",
fill='white', font=font, align='center'
)
draw.rectangle(shape, outline="red", width=5)
img.show()
| <filename>example/example.py
from age_and_gender import *
from PIL import Image, ImageDraw, ImageFont
data = AgeAndGender()
data.load_shape_predictor('models/shape_predictor_5_face_landmarks.dat')
data.load_dnn_gender_classifier('models/dnn_gender_classifier_v1.dat')
data.load_dnn_age_predictor('models/dnn_age_predictor_v1.dat')
filename = 'test-image.jpg'
img = Image.open(filename).convert("RGB")
result = data.predict(img)
font = ImageFont.truetype("Acme-Regular.ttf", 20)
for info in result:
shape = [(info['face'][0], info['face'][1]), (info['face'][2], info['face'][3])]
draw = ImageDraw.Draw(img)
gender = info['gender']['value'].title()
gender_percent = int(info['gender']['confidence'])
age = info['age']['value']
age_percent = int(info['age']['confidence'])
draw.text(
(info['face'][0] - 10, info['face'][3] + 10), f"{gender} (~{gender_percent}%)\n{age} y.o. (~{age_percent}%).",
fill='white', font=font, align='center'
)
draw.rectangle(shape, outline="red", width=5)
img.show()
| none | 1 | 2.992659 | 3 |
|
code/generate_games.py | jppg/pygame-tictactoe | 0 | 10429 | from tictactoe import TicTacToe
import random
import csv
import os
gameNr = 1
gameLimit = 10000
lst_moves_1 = []
lst_moves_2 = []
while gameNr <= gameLimit:
print("+++++++++++")
print("Game#", gameNr)
game = TicTacToe()
tmp_moves_1 = []
tmp_moves_2 = []
while game.get_winner() == 0 and game.possible_moves() > 0:
pos = game.get_positions().copy()
while game.possible_moves() > 0:
move = random.randint(0,9)
if game.play(int(move)):
if game.get_player() == 1:
tmp_moves_2.append([gameNr] + [game.get_turn() - 1] + pos + [move])
else:
tmp_moves_1.append([gameNr] + [game.get_turn() - 1] + pos + [move])
break
print("Winner of game ", gameNr, "is", game.get_winner())
if game.get_winner() == 1:
lst_moves_1.append(tmp_moves_1)
#lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1])
else:
#lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1])
lst_moves_2.append(tmp_moves_2)
#print("List X: ", lst_moves_1)
#print("List O: ", lst_moves_2)
game.print_board()
gameNr = gameNr + 1
with open('moves_1.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_1:
writer.writerows(row)
with open('moves_2.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_2:
writer.writerows(row) | from tictactoe import TicTacToe
import random
import csv
import os
gameNr = 1
gameLimit = 10000
lst_moves_1 = []
lst_moves_2 = []
while gameNr <= gameLimit:
print("+++++++++++")
print("Game#", gameNr)
game = TicTacToe()
tmp_moves_1 = []
tmp_moves_2 = []
while game.get_winner() == 0 and game.possible_moves() > 0:
pos = game.get_positions().copy()
while game.possible_moves() > 0:
move = random.randint(0,9)
if game.play(int(move)):
if game.get_player() == 1:
tmp_moves_2.append([gameNr] + [game.get_turn() - 1] + pos + [move])
else:
tmp_moves_1.append([gameNr] + [game.get_turn() - 1] + pos + [move])
break
print("Winner of game ", gameNr, "is", game.get_winner())
if game.get_winner() == 1:
lst_moves_1.append(tmp_moves_1)
#lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1])
else:
#lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1])
lst_moves_2.append(tmp_moves_2)
#print("List X: ", lst_moves_1)
#print("List O: ", lst_moves_2)
game.print_board()
gameNr = gameNr + 1
with open('moves_1.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_1:
writer.writerows(row)
with open('moves_2.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_2:
writer.writerows(row) | en | 0.119504 | #", gameNr) #lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1]) #lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1]) #print("List X: ", lst_moves_1) #print("List O: ", lst_moves_2) | 3.411806 | 3 |
applications/CoSimulationApplication/custom_data_structure/pyKratos/IntervalUtility.py | lcirrott/Kratos | 2 | 10430 | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# TODO this should be implemented, see "kratos/utilities/interval_utility.h"
class IntervalUtility(object):
def __init__(self, settings):
pass
def IsInInterval(self, current_time):
return True | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# TODO this should be implemented, see "kratos/utilities/interval_utility.h"
class IntervalUtility(object):
def __init__(self, settings):
pass
def IsInInterval(self, current_time):
return True | en | 0.762129 | # makes these scripts backward compatible with python 2.6 and 2.7 # TODO this should be implemented, see "kratos/utilities/interval_utility.h" | 2.067049 | 2 |
stixcore/tmtc/tests/test_packets.py | nicHoch/STIXCore | 1 | 10431 |
import bitstring
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.tmtc.packets import (
SOURCE_PACKET_HEADER_STRUCTURE,
TC_DATA_HEADER_STRUCTURE,
TM_DATA_HEADER_STRUCTURE,
SourcePacketHeader,
TCPacket,
TMDataHeader,
TMPacket,
)
from stixcore.tmtc.tm.tm_1 import TM_1_1
@pytest.fixture
def idb():
return IDBManager(test_data.idb.DIR).get_idb("2.26.34")
@pytest.mark.parametrize('class_header', [(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE),
(TMDataHeader, TM_DATA_HEADER_STRUCTURE)])
def test_tmtc_headers(class_header):
cls, header = class_header
test_fmt = ', '.join(header.values())
test_values = {n: 2**int(v.split(':')[-1])-1 for n, v in header.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
sph = cls(test_binary)
assert all([getattr(sph, key) == test_values[key]
for key in header.keys() if not key.startswith('spare')])
def test_tm_packet(idb):
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TM_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TMPacket(test_binary, idb=idb)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TM_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tc_packet():
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TC_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_values['process_id'] = 90
test_values['packet_category'] = 12
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TCPacket(test_binary)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TC_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tm_1_1(idb):
packet = TM_1_1('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)
assert packet.source_packet_header.process_id == 90
assert packet.source_packet_header.packet_category == 1
assert packet.data_header.service_type == 1
assert packet.data_header.service_subtype == 1
|
import bitstring
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.tmtc.packets import (
SOURCE_PACKET_HEADER_STRUCTURE,
TC_DATA_HEADER_STRUCTURE,
TM_DATA_HEADER_STRUCTURE,
SourcePacketHeader,
TCPacket,
TMDataHeader,
TMPacket,
)
from stixcore.tmtc.tm.tm_1 import TM_1_1
@pytest.fixture
def idb():
return IDBManager(test_data.idb.DIR).get_idb("2.26.34")
@pytest.mark.parametrize('class_header', [(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE),
(TMDataHeader, TM_DATA_HEADER_STRUCTURE)])
def test_tmtc_headers(class_header):
cls, header = class_header
test_fmt = ', '.join(header.values())
test_values = {n: 2**int(v.split(':')[-1])-1 for n, v in header.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
sph = cls(test_binary)
assert all([getattr(sph, key) == test_values[key]
for key in header.keys() if not key.startswith('spare')])
def test_tm_packet(idb):
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TM_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TMPacket(test_binary, idb=idb)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TM_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tc_packet():
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TC_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_values['process_id'] = 90
test_values['packet_category'] = 12
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TCPacket(test_binary)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TC_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tm_1_1(idb):
packet = TM_1_1('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)
assert packet.source_packet_header.process_id == 90
assert packet.source_packet_header.packet_category == 1
assert packet.data_header.service_type == 1
assert packet.data_header.service_subtype == 1
| none | 1 | 2.110349 | 2 |
|
python/thunder/rdds/fileio/seriesloader.py | broxtronix/thunder | 0 | 10432 | <reponame>broxtronix/thunder<gh_stars>0
"""Provides SeriesLoader object and helpers, used to read Series data from disk or other filesystems.
"""
from collections import namedtuple
import json
from numpy import array, arange, frombuffer, load, ndarray, unravel_index, vstack
from numpy import dtype as dtypeFunc
from scipy.io import loadmat
from cStringIO import StringIO
import itertools
import struct
import urlparse
import math
from thunder.rdds.fileio.writers import getParallelWriterForPath
from thunder.rdds.keys import Dimensions
from thunder.rdds.fileio.readers import getFileReaderForPath, FileNotFoundError, appendExtensionToPathSpec
from thunder.rdds.imgblocks.blocks import SimpleBlocks
from thunder.rdds.series import Series
from thunder.utils.common import parseMemoryString, smallestFloatType
class SeriesLoader(object):
"""Loader object used to instantiate Series data stored in a variety of formats.
"""
def __init__(self, sparkContext, minPartitions=None):
"""Initialize a new SeriesLoader object.
Parameters
----------
sparkcontext: SparkContext
The pyspark SparkContext object used by the current Thunder environment.
minPartitions: int
minimum number of partitions to use when loading data. (Used by fromText, fromMatLocal, and fromNpyLocal)
"""
from thunder.utils.aws import AWSCredentials
self.sc = sparkContext
self.minPartitions = minPartitions
self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)
def _checkOverwrite(self, outputDirPath):
from thunder.utils.common import raiseErrorIfPathExists
raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self.awsCredentialsOverride)
def fromArrays(self, arrays, npartitions=None):
"""
Create a Series object from a sequence of 1d numpy arrays on the driver.
"""
# recast singleton
if isinstance(arrays, ndarray):
arrays = [arrays]
# check shape and dtype
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# generate linear keys
keys = map(lambda k: (k,), xrange(0, len(arrays)))
return Series(self.sc.parallelize(zip(keys, arrays), npartitions), dtype=str(dtype))
def fromArraysAsImages(self, arrays):
"""Create a Series object from a sequence of numpy ndarrays resident in memory on the driver.
The arrays will be interpreted as though each represents a single time point - effectively the same
as if converting Images to a Series, with each array representing a volume image at a particular
point in time. Thus in the resulting Series, the value of the record with key (0,0,0) will be
array([arrays[0][0,0,0], arrays[1][0,0,0],... arrays[n][0,0,0]).
The dimensions of the resulting Series will be *opposite* that of the passed numpy array. Their dtype will not
be changed.
"""
# if passed a single array, cast it to a sequence of length 1
if isinstance(arrays, ndarray):
arrays = [arrays]
# check that shapes of passed arrays are consistent
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# get indices so that fastest index changes first
shapeiters = (xrange(n) for n in shape)
keys = [idx[::-1] for idx in itertools.product(*shapeiters)]
values = vstack([ary.ravel() for ary in arrays]).T
dims = Dimensions.fromTuple(shape[::-1])
return Series(self.sc.parallelize(zip(keys, values), self.minPartitions), dims=dims, dtype=str(dtype))
@staticmethod
def __normalizeDatafilePattern(dataPath, ext):
dataPath = appendExtensionToPathSpec(dataPath, ext)
# we do need to prepend a scheme here, b/c otherwise the Hadoop based readers
# will adopt their default behavior and start looking on hdfs://.
parseResult = urlparse.urlparse(dataPath)
if parseResult.scheme:
# this appears to already be a fully-qualified URI
return dataPath
else:
# this looks like a local path spec
# check whether we look like an absolute or a relative path
import os
dirComponent, fileComponent = os.path.split(dataPath)
if not os.path.isabs(dirComponent):
# need to make relative local paths absolute; our file scheme parsing isn't all that it could be.
dirComponent = os.path.abspath(dirComponent)
dataPath = os.path.join(dirComponent, fileComponent)
return "file://" + dataPath
def fromText(self, dataPath, nkeys=None, ext="txt", dtype='float64'):
"""
Loads Series data from text files.
Parameters
----------
dataPath : string
Specifies the file or files to be loaded. dataPath may be either a URI (with scheme specified) or a path
on the local filesystem.
If a path is passed (determined by the absence of a scheme component when attempting to parse as a URI),
and it is not already a wildcard expression and does not end in <ext>, then it will be converted into a
wildcard pattern by appending '/*.ext'. This conversion can be avoided by passing a "file://" URI.
dtype: dtype or dtype specifier, default 'float64'
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
def parse(line, nkeys_):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[nkeys_:], dtype=dtype)
keys = tuple(int(x) for x in vec[:nkeys_])
return keys, ts
lines = self.sc.textFile(dataPath, self.minPartitions)
data = lines.map(lambda x: parse(x, nkeys))
return Series(data, dtype=str(dtype))
# keytype, valuetype here violate camelCasing convention for consistence with JSON conf file format
BinaryLoadParameters = namedtuple('BinaryLoadParameters', 'nkeys nvalues keytype valuetype')
BinaryLoadParameters.__new__.__defaults__ = (None, None, 'int16', 'int16')
def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):
"""Collects parameters to use for binary series loading.
Priority order is as follows:
1. parameters specified as keyword arguments;
2. parameters specified in a conf.json file on the local filesystem;
3. default parameters
Returns
-------
BinaryLoadParameters instance
"""
params = self.loadConf(dataPath, confFilename=confFilename)
# filter dict to include only recognized field names:
for k in params.keys():
if k not in SeriesLoader.BinaryLoadParameters._fields:
del params[k]
keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}
for k, v in keywordParams.items():
if not v:
del keywordParams[k]
params.update(keywordParams)
return SeriesLoader.BinaryLoadParameters(**params)
@staticmethod
def __checkBinaryParametersAreSpecified(paramsObj):
"""Throws ValueError if any of the field values in the passed namedtuple instance evaluate to False.
Note this is okay only so long as zero is not a valid parameter value. Hmm.
"""
missing = []
for paramName, paramVal in paramsObj._asdict().iteritems():
if not paramVal:
missing.append(paramName)
if missing:
raise ValueError("Missing parameters to load binary series files - " +
"these must be given either as arguments or in a configuration file: " +
str(tuple(missing)))
def fromBinary(self, dataPath, ext='bin', confFilename='conf.json',
nkeys=None, nvalues=None, keyType=None, valueType=None,
newDtype='smallfloat', casting='safe', maxPartitionSize='32mb'):
"""
Load a Series object from a directory of binary files.
Parameters
----------
dataPath : string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://", or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
newDtype : dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting : 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
maxPartitionSize : str, optional, default = '32mb'
Maximum size of partitions as Java-style memory, will indirectly control the number of partitions
"""
paramsObj = self.__loadParametersAndDefaults(dataPath, confFilename, nkeys, nvalues, keyType, valueType)
self.__checkBinaryParametersAreSpecified(paramsObj)
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
keyDtype = dtypeFunc(paramsObj.keytype)
valDtype = dtypeFunc(paramsObj.valuetype)
keySize = paramsObj.nkeys * keyDtype.itemsize
recordSize = keySize + paramsObj.nvalues * valDtype.itemsize
from thunder.utils.common import parseMemoryString
if isinstance(maxPartitionSize, basestring):
size = parseMemoryString(maxPartitionSize)
else:
raise Exception("Invalid size specification")
hadoopConf = {'recordLength': str(recordSize), 'mapred.max.split.size': str(size)}
lines = self.sc.newAPIHadoopFile(dataPath, 'thunder.util.io.hadoop.FixedLengthBinaryInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.BytesWritable',
conf=hadoopConf)
data = lines.map(lambda (_, v):
(tuple(int(x) for x in frombuffer(buffer(v, 0, keySize), dtype=keyDtype)),
frombuffer(buffer(v, keySize), dtype=valDtype)))
return Series(data, dtype=str(valDtype), index=arange(paramsObj.nvalues)).astype(newDtype, casting)
def _getSeriesBlocksFromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Create an RDD of <string blocklabel, (int k-tuple indices, array of datatype values)>
Parameters
----------
dataPath: string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://" or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the
requested `newdtype` - see numpy `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
Returns
---------
pair of (RDD, ntimepoints)
RDD: sequence of keys, values pairs
(call using flatMap)
RDD Key: tuple of int
zero-based indicies of position within original image volume
RDD Value: numpy array of datatype
series of values at position across loaded image volumes
ntimepoints: int
number of time points in returned series, determined from number of stack files found at dataPath
newDtype: string
string representation of numpy data type of returned blocks
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
totalDim = reduce(lambda x_, y_: x_*y_, dims)
dtype = dtypeFunc(dtype)
if newDtype is None or newDtype == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
dataSize = totalDim * len(filenames) * dtype.itemsize
nblocks = max(dataSize / blockSize, 1) # integer division
if len(dims) >= 3:
# for 3D stacks, do calculations to ensure that
# different planes appear in distinct files
blocksPerPlane = max(nblocks / dims[-1], 1)
pixPerPlane = reduce(lambda x_, y_: x_*y_, dims[:-1]) # all but last dimension
# get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane
# evenly. This will always be at least one.
kUpdated = [x for x in range(1, blocksPerPlane+1) if not pixPerPlane % x][-1]
nblocks = kUpdated * dims[-1]
blockSizePerStack = (totalDim / nblocks) * dtype.itemsize
else:
# otherwise just round to make contents divide into nearly even blocks
blockSizePerStack = int(math.ceil(totalDim / float(nblocks)))
nblocks = int(math.ceil(totalDim / float(blockSizePerStack)))
blockSizePerStack *= dtype.itemsize
fileSize = totalDim * dtype.itemsize
def readBlock(blockNum):
# copy size out from closure; will modify later:
blockSizePerStack_ = blockSizePerStack
# get start position for this block
position = blockNum * blockSizePerStack_
# adjust if at end of file
if (position + blockSizePerStack_) > fileSize:
blockSizePerStack_ = int(fileSize - position)
# loop over files, loading one block from each
bufs = []
for fname in filenames:
buf = reader.read(fname, startOffset=position, size=blockSizePerStack_)
bufs.append(frombuffer(buf, dtype=dtype))
buf = vstack(bufs).T # dimensions are now linindex x time (images)
del bufs
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
itemPosition = position / dtype.itemsize
itemBlocksize = blockSizePerStack_ / dtype.itemsize
linearIdx = arange(itemPosition, itemPosition + itemBlocksize) # zero-based
keys = zip(*map(tuple, unravel_index(linearIdx, dims, order='F')))
return zip(keys, buf)
# map over blocks
return (self.sc.parallelize(range(0, nblocks), nblocks).flatMap(lambda bn: readBlock(bn)),
len(filenames), newDtype)
@staticmethod
def __readMetadataFromFirstPageOfMultiTif(reader, filePath):
import thunder.rdds.fileio.multitif as multitif
# read first page of first file to get expected image size
tiffFP = reader.open(filePath)
tiffParser = multitif.TiffParser(tiffFP, debug=False)
tiffHeaders = multitif.TiffData()
tiffParser.parseFileHeader(destinationTiff=tiffHeaders)
firstIfd = tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders)
if not firstIfd.isLuminanceImage():
raise ValueError(("File %s does not appear to be a luminance " % filePath) +
"(greyscale or bilevel) TIF image, " +
"which are the only types currently supported")
# keep reading pages until we reach the end of the file, in order to get number of planes:
while tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders):
pass
# get dimensions
npages = len(tiffHeaders.ifds)
height = firstIfd.getImageHeight()
width = firstIfd.getImageWidth()
# get datatype
bitsPerSample = firstIfd.getBitsPerSample()
if not (bitsPerSample in (8, 16, 32, 64)):
raise ValueError("Only 8, 16, 32, or 64 bit per pixel TIF images are supported, got %d" % bitsPerSample)
sampleFormat = firstIfd.getSampleFormat()
if sampleFormat == multitif.SAMPLE_FORMAT_UINT:
dtStr = 'uint'
elif sampleFormat == multitif.SAMPLE_FORMAT_INT:
dtStr = 'int'
elif sampleFormat == multitif.SAMPLE_FORMAT_FLOAT:
dtStr = 'float'
else:
raise ValueError("Unknown TIF SampleFormat tag value %d, should be 1, 2, or 3 for uint, int, or float"
% sampleFormat)
dtype = dtStr+str(bitsPerSample)
return height, width, npages, dtype
def _getSeriesBlocksFromMultiTif(self, dataPath, ext="tif", blockSize="150M",
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None,
recursive=False):
import thunder.rdds.fileio.multitif as multitif
import itertools
from PIL import Image
import io
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
ntimepoints = len(filenames)
doMinimizeReads = dataPath.lower().startswith("s3") or dataPath.lower().startswith("gs")
# check PIL version to see whether it is actually pillow or indeed old PIL and choose
# conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array
# for more explanation.
isPillow = hasattr(Image, "PILLOW_VERSION")
if isPillow:
conversionFcn = array # use numpy's array() function
else:
from thunder.utils.common import pil_to_array
conversionFcn = pil_to_array # use our modified version of matplotlib's pil_to_array
height, width, npages, dtype = SeriesLoader.__readMetadataFromFirstPageOfMultiTif(reader, filenames[0])
if dtype.startswith('int'):
raise ValueError('Signed integer tiff images are not supported in SeriesLoader (shuffle=False);' +
' please try loading as Images (shuffle=True)')
pixelBytesize = dtypeFunc(dtype).itemsize
if newDtype is None or str(newDtype) == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
# intialize at one block per plane
bytesPerPlane = height * width * pixelBytesize * ntimepoints
bytesPerBlock = bytesPerPlane
blocksPerPlane = 1
# keep dividing while cutting our size in half still leaves us bigger than the requested size
# should end up no more than 2x blockSize.
while bytesPerBlock >= blockSize * 2:
bytesPerBlock /= 2
blocksPerPlane *= 2
blocklenPixels = max((height * width) / blocksPerPlane, 1) # integer division
while blocksPerPlane * blocklenPixels < height * width: # make sure we're reading the plane fully
blocksPerPlane += 1
# prevent bringing in self in closure:
awsCredentialsOverride = self.awsCredentialsOverride
# keys will be planeidx, blockidx:
keys = list(itertools.product(xrange(npages), xrange(blocksPerPlane)))
def readBlockFromTiff(planeIdxBlockIdx):
planeIdx, blockIdx = planeIdxBlockIdx
blocks = []
planeShape = None
blockStart = None
blockEnd = None
for fname in filenames:
reader_ = getFileReaderForPath(fname)(awsCredentialsOverride=awsCredentialsOverride)
fp = reader_.open(fname)
try:
if doMinimizeReads:
# use multitif module to generate a fake, in-memory
# one-page tif file. the advantage of this is that it
# cuts way down on the many small reads that PIL/pillow
# will make otherwise, which would be a problem for s3
# or Google Storage
tiffParser_ = multitif.TiffParser(fp, debug=False)
tiffFilebuffer = multitif.packSinglePage(tiffParser_, pageIdx=planeIdx)
byteBuf = io.BytesIO(tiffFilebuffer)
try:
pilImg = Image.open(byteBuf)
ary = conversionFcn(pilImg).T
finally:
byteBuf.close()
del tiffFilebuffer, tiffParser_, pilImg, byteBuf
else:
# read tif using PIL directly
pilImg = Image.open(fp)
pilImg.seek(planeIdx)
ary = conversionFcn(pilImg).T
del pilImg
if not planeShape:
planeShape = ary.shape[:]
blockStart = blockIdx * blocklenPixels
blockEnd = min(blockStart+blocklenPixels, planeShape[0]*planeShape[1])
blocks.append(ary.ravel(order='C')[blockStart:blockEnd])
del ary
finally:
fp.close()
buf = vstack(blocks).T # dimensions are now linindex x time (images)
del blocks
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
linearIdx = arange(blockStart, blockEnd) # zero-based
seriesKeys = zip(*map(tuple, unravel_index(linearIdx, planeShape, order='C')))
# add plane index to end of keys
if npages > 1:
seriesKeys = [tuple(list(keys_)[::-1]+[planeIdx]) for keys_ in seriesKeys]
else:
seriesKeys = [tuple(list(keys_)[::-1]) for keys_ in seriesKeys]
return zip(seriesKeys, buf)
# map over blocks
rdd = self.sc.parallelize(keys, len(keys)).flatMap(readBlockFromTiff)
if npages > 1:
dims = (npages, width, height)
else:
dims = (width, height)
metadata = (dims, ntimepoints, newDtype)
return rdd, metadata
def fromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object directly from binary image stack files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return Series(seriesBlocks, dims=dims, dtype=newDtype, index=arange(npointsInSeries))
def fromTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe',
startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object from multipage tiff files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
ext: string, optional, default "tif"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
return Series(seriesBlocks, dims=Dimensions.fromTuple(dims[::-1]), dtype=dtype,
index=arange(npointsInSeries))
def __saveSeriesRdd(self, seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=False):
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def blockToBinarySeries(kvIter):
label = None
keyPacker = None
buf = StringIO()
for seriesKey, series in kvIter:
if keyPacker is None:
keyPacker = struct.Struct('h'*len(seriesKey))
label = SimpleBlocks.getBinarySeriesNameForKey(seriesKey) + ".bin"
buf.write(keyPacker.pack(*seriesKey))
buf.write(series.tostring())
val = buf.getvalue()
buf.close()
return [(label, val)]
seriesBlocks.mapPartitions(blockToBinarySeries).foreach(writer.writerFcn)
writeSeriesConfig(outputDirPath, len(dims), npointsInSeries, valueType=dtype, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def saveFromStack(self, dataPath, outputDirPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype=None, casting='safe', startIdx=None, stopIdx=None, overwrite=False, recursive=False):
"""Write out data from binary image stack files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, newDtype, overwrite=overwrite)
def saveFromTif(self, dataPath, outputDirPath, ext="tif", blockSize="150M",
newDtype=None, casting='safe', startIdx=None, stopIdx=None,
overwrite=False, recursive=False):
"""Write out data from multipage tif files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPpath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=overwrite)
def fromMatLocal(self, dataPath, varName, keyFile=None):
"""Loads Series data stored in a Matlab .mat file.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = loadmat(dataPath)[varName]
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), loadmat(keyFile)['keys'])
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def fromNpyLocal(self, dataPath, keyFile=None):
"""Loads Series data stored in the numpy save() .npy format.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = load(dataPath)
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), load(keyFile))
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def loadConf(self, dataPath, confFilename='conf.json'):
"""Returns a dict loaded from a json file.
Looks for file named `conffile` in same directory as `dataPath`
Returns {} if file not found
"""
if not confFilename:
return {}
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
try:
jsonBuf = reader.read(dataPath, filename=confFilename)
except FileNotFoundError:
return {}
params = json.loads(jsonBuf)
if 'format' in params:
raise Exception("Numerical format of value should be specified as 'valuetype', not 'format'")
if 'keyformat' in params:
raise Exception("Numerical format of key should be specified as 'keytype', not 'keyformat'")
return params
def writeSeriesConfig(outputDirPath, nkeys, nvalues, keyType='int16', valueType='int16',
confFilename="conf.json", overwrite=True, awsCredentialsOverride=None):
"""
Helper function to write out a conf.json file with required information to load Series binary data.
"""
import json
from thunder.rdds.fileio.writers import getFileWriterForPath
filewriterClass = getFileWriterForPath(outputDirPath)
# write configuration file
# config JSON keys are lowercased "valuetype", "keytype", not valueType, keyType
conf = {'input': outputDirPath,
'nkeys': nkeys, 'nvalues': nvalues,
'valuetype': str(valueType), 'keytype': str(keyType)}
confWriter = filewriterClass(outputDirPath, confFilename, overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
confWriter.writeFile(json.dumps(conf, indent=2))
# touch "SUCCESS" file as final action
successWriter = filewriterClass(outputDirPath, "SUCCESS", overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
successWriter.writeFile('')
| """Provides SeriesLoader object and helpers, used to read Series data from disk or other filesystems.
"""
from collections import namedtuple
import json
from numpy import array, arange, frombuffer, load, ndarray, unravel_index, vstack
from numpy import dtype as dtypeFunc
from scipy.io import loadmat
from cStringIO import StringIO
import itertools
import struct
import urlparse
import math
from thunder.rdds.fileio.writers import getParallelWriterForPath
from thunder.rdds.keys import Dimensions
from thunder.rdds.fileio.readers import getFileReaderForPath, FileNotFoundError, appendExtensionToPathSpec
from thunder.rdds.imgblocks.blocks import SimpleBlocks
from thunder.rdds.series import Series
from thunder.utils.common import parseMemoryString, smallestFloatType
class SeriesLoader(object):
"""Loader object used to instantiate Series data stored in a variety of formats.
"""
def __init__(self, sparkContext, minPartitions=None):
"""Initialize a new SeriesLoader object.
Parameters
----------
sparkcontext: SparkContext
The pyspark SparkContext object used by the current Thunder environment.
minPartitions: int
minimum number of partitions to use when loading data. (Used by fromText, fromMatLocal, and fromNpyLocal)
"""
from thunder.utils.aws import AWSCredentials
self.sc = sparkContext
self.minPartitions = minPartitions
self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)
def _checkOverwrite(self, outputDirPath):
from thunder.utils.common import raiseErrorIfPathExists
raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self.awsCredentialsOverride)
def fromArrays(self, arrays, npartitions=None):
"""
Create a Series object from a sequence of 1d numpy arrays on the driver.
"""
# recast singleton
if isinstance(arrays, ndarray):
arrays = [arrays]
# check shape and dtype
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# generate linear keys
keys = map(lambda k: (k,), xrange(0, len(arrays)))
return Series(self.sc.parallelize(zip(keys, arrays), npartitions), dtype=str(dtype))
def fromArraysAsImages(self, arrays):
"""Create a Series object from a sequence of numpy ndarrays resident in memory on the driver.
The arrays will be interpreted as though each represents a single time point - effectively the same
as if converting Images to a Series, with each array representing a volume image at a particular
point in time. Thus in the resulting Series, the value of the record with key (0,0,0) will be
array([arrays[0][0,0,0], arrays[1][0,0,0],... arrays[n][0,0,0]).
The dimensions of the resulting Series will be *opposite* that of the passed numpy array. Their dtype will not
be changed.
"""
# if passed a single array, cast it to a sequence of length 1
if isinstance(arrays, ndarray):
arrays = [arrays]
# check that shapes of passed arrays are consistent
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# get indices so that fastest index changes first
shapeiters = (xrange(n) for n in shape)
keys = [idx[::-1] for idx in itertools.product(*shapeiters)]
values = vstack([ary.ravel() for ary in arrays]).T
dims = Dimensions.fromTuple(shape[::-1])
return Series(self.sc.parallelize(zip(keys, values), self.minPartitions), dims=dims, dtype=str(dtype))
@staticmethod
def __normalizeDatafilePattern(dataPath, ext):
dataPath = appendExtensionToPathSpec(dataPath, ext)
# we do need to prepend a scheme here, b/c otherwise the Hadoop based readers
# will adopt their default behavior and start looking on hdfs://.
parseResult = urlparse.urlparse(dataPath)
if parseResult.scheme:
# this appears to already be a fully-qualified URI
return dataPath
else:
# this looks like a local path spec
# check whether we look like an absolute or a relative path
import os
dirComponent, fileComponent = os.path.split(dataPath)
if not os.path.isabs(dirComponent):
# need to make relative local paths absolute; our file scheme parsing isn't all that it could be.
dirComponent = os.path.abspath(dirComponent)
dataPath = os.path.join(dirComponent, fileComponent)
return "file://" + dataPath
def fromText(self, dataPath, nkeys=None, ext="txt", dtype='float64'):
"""
Loads Series data from text files.
Parameters
----------
dataPath : string
Specifies the file or files to be loaded. dataPath may be either a URI (with scheme specified) or a path
on the local filesystem.
If a path is passed (determined by the absence of a scheme component when attempting to parse as a URI),
and it is not already a wildcard expression and does not end in <ext>, then it will be converted into a
wildcard pattern by appending '/*.ext'. This conversion can be avoided by passing a "file://" URI.
dtype: dtype or dtype specifier, default 'float64'
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
def parse(line, nkeys_):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[nkeys_:], dtype=dtype)
keys = tuple(int(x) for x in vec[:nkeys_])
return keys, ts
lines = self.sc.textFile(dataPath, self.minPartitions)
data = lines.map(lambda x: parse(x, nkeys))
return Series(data, dtype=str(dtype))
# keytype, valuetype here violate camelCasing convention for consistence with JSON conf file format
BinaryLoadParameters = namedtuple('BinaryLoadParameters', 'nkeys nvalues keytype valuetype')
BinaryLoadParameters.__new__.__defaults__ = (None, None, 'int16', 'int16')
def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):
"""Collects parameters to use for binary series loading.
Priority order is as follows:
1. parameters specified as keyword arguments;
2. parameters specified in a conf.json file on the local filesystem;
3. default parameters
Returns
-------
BinaryLoadParameters instance
"""
params = self.loadConf(dataPath, confFilename=confFilename)
# filter dict to include only recognized field names:
for k in params.keys():
if k not in SeriesLoader.BinaryLoadParameters._fields:
del params[k]
keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}
for k, v in keywordParams.items():
if not v:
del keywordParams[k]
params.update(keywordParams)
return SeriesLoader.BinaryLoadParameters(**params)
@staticmethod
def __checkBinaryParametersAreSpecified(paramsObj):
"""Throws ValueError if any of the field values in the passed namedtuple instance evaluate to False.
Note this is okay only so long as zero is not a valid parameter value. Hmm.
"""
missing = []
for paramName, paramVal in paramsObj._asdict().iteritems():
if not paramVal:
missing.append(paramName)
if missing:
raise ValueError("Missing parameters to load binary series files - " +
"these must be given either as arguments or in a configuration file: " +
str(tuple(missing)))
def fromBinary(self, dataPath, ext='bin', confFilename='conf.json',
nkeys=None, nvalues=None, keyType=None, valueType=None,
newDtype='smallfloat', casting='safe', maxPartitionSize='32mb'):
"""
Load a Series object from a directory of binary files.
Parameters
----------
dataPath : string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://", or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
newDtype : dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting : 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
maxPartitionSize : str, optional, default = '32mb'
Maximum size of partitions as Java-style memory, will indirectly control the number of partitions
"""
paramsObj = self.__loadParametersAndDefaults(dataPath, confFilename, nkeys, nvalues, keyType, valueType)
self.__checkBinaryParametersAreSpecified(paramsObj)
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
keyDtype = dtypeFunc(paramsObj.keytype)
valDtype = dtypeFunc(paramsObj.valuetype)
keySize = paramsObj.nkeys * keyDtype.itemsize
recordSize = keySize + paramsObj.nvalues * valDtype.itemsize
from thunder.utils.common import parseMemoryString
if isinstance(maxPartitionSize, basestring):
size = parseMemoryString(maxPartitionSize)
else:
raise Exception("Invalid size specification")
hadoopConf = {'recordLength': str(recordSize), 'mapred.max.split.size': str(size)}
lines = self.sc.newAPIHadoopFile(dataPath, 'thunder.util.io.hadoop.FixedLengthBinaryInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.BytesWritable',
conf=hadoopConf)
data = lines.map(lambda (_, v):
(tuple(int(x) for x in frombuffer(buffer(v, 0, keySize), dtype=keyDtype)),
frombuffer(buffer(v, keySize), dtype=valDtype)))
return Series(data, dtype=str(valDtype), index=arange(paramsObj.nvalues)).astype(newDtype, casting)
def _getSeriesBlocksFromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Create an RDD of <string blocklabel, (int k-tuple indices, array of datatype values)>
Parameters
----------
dataPath: string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://" or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the
requested `newdtype` - see numpy `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
Returns
---------
pair of (RDD, ntimepoints)
RDD: sequence of keys, values pairs
(call using flatMap)
RDD Key: tuple of int
zero-based indicies of position within original image volume
RDD Value: numpy array of datatype
series of values at position across loaded image volumes
ntimepoints: int
number of time points in returned series, determined from number of stack files found at dataPath
newDtype: string
string representation of numpy data type of returned blocks
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
totalDim = reduce(lambda x_, y_: x_*y_, dims)
dtype = dtypeFunc(dtype)
if newDtype is None or newDtype == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
dataSize = totalDim * len(filenames) * dtype.itemsize
nblocks = max(dataSize / blockSize, 1) # integer division
if len(dims) >= 3:
# for 3D stacks, do calculations to ensure that
# different planes appear in distinct files
blocksPerPlane = max(nblocks / dims[-1], 1)
pixPerPlane = reduce(lambda x_, y_: x_*y_, dims[:-1]) # all but last dimension
# get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane
# evenly. This will always be at least one.
kUpdated = [x for x in range(1, blocksPerPlane+1) if not pixPerPlane % x][-1]
nblocks = kUpdated * dims[-1]
blockSizePerStack = (totalDim / nblocks) * dtype.itemsize
else:
# otherwise just round to make contents divide into nearly even blocks
blockSizePerStack = int(math.ceil(totalDim / float(nblocks)))
nblocks = int(math.ceil(totalDim / float(blockSizePerStack)))
blockSizePerStack *= dtype.itemsize
fileSize = totalDim * dtype.itemsize
def readBlock(blockNum):
# copy size out from closure; will modify later:
blockSizePerStack_ = blockSizePerStack
# get start position for this block
position = blockNum * blockSizePerStack_
# adjust if at end of file
if (position + blockSizePerStack_) > fileSize:
blockSizePerStack_ = int(fileSize - position)
# loop over files, loading one block from each
bufs = []
for fname in filenames:
buf = reader.read(fname, startOffset=position, size=blockSizePerStack_)
bufs.append(frombuffer(buf, dtype=dtype))
buf = vstack(bufs).T # dimensions are now linindex x time (images)
del bufs
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
itemPosition = position / dtype.itemsize
itemBlocksize = blockSizePerStack_ / dtype.itemsize
linearIdx = arange(itemPosition, itemPosition + itemBlocksize) # zero-based
keys = zip(*map(tuple, unravel_index(linearIdx, dims, order='F')))
return zip(keys, buf)
# map over blocks
return (self.sc.parallelize(range(0, nblocks), nblocks).flatMap(lambda bn: readBlock(bn)),
len(filenames), newDtype)
@staticmethod
def __readMetadataFromFirstPageOfMultiTif(reader, filePath):
import thunder.rdds.fileio.multitif as multitif
# read first page of first file to get expected image size
tiffFP = reader.open(filePath)
tiffParser = multitif.TiffParser(tiffFP, debug=False)
tiffHeaders = multitif.TiffData()
tiffParser.parseFileHeader(destinationTiff=tiffHeaders)
firstIfd = tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders)
if not firstIfd.isLuminanceImage():
raise ValueError(("File %s does not appear to be a luminance " % filePath) +
"(greyscale or bilevel) TIF image, " +
"which are the only types currently supported")
# keep reading pages until we reach the end of the file, in order to get number of planes:
while tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders):
pass
# get dimensions
npages = len(tiffHeaders.ifds)
height = firstIfd.getImageHeight()
width = firstIfd.getImageWidth()
# get datatype
bitsPerSample = firstIfd.getBitsPerSample()
if not (bitsPerSample in (8, 16, 32, 64)):
raise ValueError("Only 8, 16, 32, or 64 bit per pixel TIF images are supported, got %d" % bitsPerSample)
sampleFormat = firstIfd.getSampleFormat()
if sampleFormat == multitif.SAMPLE_FORMAT_UINT:
dtStr = 'uint'
elif sampleFormat == multitif.SAMPLE_FORMAT_INT:
dtStr = 'int'
elif sampleFormat == multitif.SAMPLE_FORMAT_FLOAT:
dtStr = 'float'
else:
raise ValueError("Unknown TIF SampleFormat tag value %d, should be 1, 2, or 3 for uint, int, or float"
% sampleFormat)
dtype = dtStr+str(bitsPerSample)
return height, width, npages, dtype
def _getSeriesBlocksFromMultiTif(self, dataPath, ext="tif", blockSize="150M",
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None,
recursive=False):
import thunder.rdds.fileio.multitif as multitif
import itertools
from PIL import Image
import io
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
ntimepoints = len(filenames)
doMinimizeReads = dataPath.lower().startswith("s3") or dataPath.lower().startswith("gs")
# check PIL version to see whether it is actually pillow or indeed old PIL and choose
# conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array
# for more explanation.
isPillow = hasattr(Image, "PILLOW_VERSION")
if isPillow:
conversionFcn = array # use numpy's array() function
else:
from thunder.utils.common import pil_to_array
conversionFcn = pil_to_array # use our modified version of matplotlib's pil_to_array
height, width, npages, dtype = SeriesLoader.__readMetadataFromFirstPageOfMultiTif(reader, filenames[0])
if dtype.startswith('int'):
raise ValueError('Signed integer tiff images are not supported in SeriesLoader (shuffle=False);' +
' please try loading as Images (shuffle=True)')
pixelBytesize = dtypeFunc(dtype).itemsize
if newDtype is None or str(newDtype) == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
# intialize at one block per plane
bytesPerPlane = height * width * pixelBytesize * ntimepoints
bytesPerBlock = bytesPerPlane
blocksPerPlane = 1
# keep dividing while cutting our size in half still leaves us bigger than the requested size
# should end up no more than 2x blockSize.
while bytesPerBlock >= blockSize * 2:
bytesPerBlock /= 2
blocksPerPlane *= 2
blocklenPixels = max((height * width) / blocksPerPlane, 1) # integer division
while blocksPerPlane * blocklenPixels < height * width: # make sure we're reading the plane fully
blocksPerPlane += 1
# prevent bringing in self in closure:
awsCredentialsOverride = self.awsCredentialsOverride
# keys will be planeidx, blockidx:
keys = list(itertools.product(xrange(npages), xrange(blocksPerPlane)))
def readBlockFromTiff(planeIdxBlockIdx):
planeIdx, blockIdx = planeIdxBlockIdx
blocks = []
planeShape = None
blockStart = None
blockEnd = None
for fname in filenames:
reader_ = getFileReaderForPath(fname)(awsCredentialsOverride=awsCredentialsOverride)
fp = reader_.open(fname)
try:
if doMinimizeReads:
# use multitif module to generate a fake, in-memory
# one-page tif file. the advantage of this is that it
# cuts way down on the many small reads that PIL/pillow
# will make otherwise, which would be a problem for s3
# or Google Storage
tiffParser_ = multitif.TiffParser(fp, debug=False)
tiffFilebuffer = multitif.packSinglePage(tiffParser_, pageIdx=planeIdx)
byteBuf = io.BytesIO(tiffFilebuffer)
try:
pilImg = Image.open(byteBuf)
ary = conversionFcn(pilImg).T
finally:
byteBuf.close()
del tiffFilebuffer, tiffParser_, pilImg, byteBuf
else:
# read tif using PIL directly
pilImg = Image.open(fp)
pilImg.seek(planeIdx)
ary = conversionFcn(pilImg).T
del pilImg
if not planeShape:
planeShape = ary.shape[:]
blockStart = blockIdx * blocklenPixels
blockEnd = min(blockStart+blocklenPixels, planeShape[0]*planeShape[1])
blocks.append(ary.ravel(order='C')[blockStart:blockEnd])
del ary
finally:
fp.close()
buf = vstack(blocks).T # dimensions are now linindex x time (images)
del blocks
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
linearIdx = arange(blockStart, blockEnd) # zero-based
seriesKeys = zip(*map(tuple, unravel_index(linearIdx, planeShape, order='C')))
# add plane index to end of keys
if npages > 1:
seriesKeys = [tuple(list(keys_)[::-1]+[planeIdx]) for keys_ in seriesKeys]
else:
seriesKeys = [tuple(list(keys_)[::-1]) for keys_ in seriesKeys]
return zip(seriesKeys, buf)
# map over blocks
rdd = self.sc.parallelize(keys, len(keys)).flatMap(readBlockFromTiff)
if npages > 1:
dims = (npages, width, height)
else:
dims = (width, height)
metadata = (dims, ntimepoints, newDtype)
return rdd, metadata
def fromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object directly from binary image stack files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return Series(seriesBlocks, dims=dims, dtype=newDtype, index=arange(npointsInSeries))
def fromTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe',
startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object from multipage tiff files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
ext: string, optional, default "tif"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
return Series(seriesBlocks, dims=Dimensions.fromTuple(dims[::-1]), dtype=dtype,
index=arange(npointsInSeries))
def __saveSeriesRdd(self, seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=False):
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def blockToBinarySeries(kvIter):
label = None
keyPacker = None
buf = StringIO()
for seriesKey, series in kvIter:
if keyPacker is None:
keyPacker = struct.Struct('h'*len(seriesKey))
label = SimpleBlocks.getBinarySeriesNameForKey(seriesKey) + ".bin"
buf.write(keyPacker.pack(*seriesKey))
buf.write(series.tostring())
val = buf.getvalue()
buf.close()
return [(label, val)]
seriesBlocks.mapPartitions(blockToBinarySeries).foreach(writer.writerFcn)
writeSeriesConfig(outputDirPath, len(dims), npointsInSeries, valueType=dtype, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def saveFromStack(self, dataPath, outputDirPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype=None, casting='safe', startIdx=None, stopIdx=None, overwrite=False, recursive=False):
"""Write out data from binary image stack files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, newDtype, overwrite=overwrite)
def saveFromTif(self, dataPath, outputDirPath, ext="tif", blockSize="150M",
newDtype=None, casting='safe', startIdx=None, stopIdx=None,
overwrite=False, recursive=False):
"""Write out data from multipage tif files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPpath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=overwrite)
def fromMatLocal(self, dataPath, varName, keyFile=None):
"""Loads Series data stored in a Matlab .mat file.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = loadmat(dataPath)[varName]
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), loadmat(keyFile)['keys'])
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def fromNpyLocal(self, dataPath, keyFile=None):
"""Loads Series data stored in the numpy save() .npy format.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = load(dataPath)
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), load(keyFile))
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def loadConf(self, dataPath, confFilename='conf.json'):
"""Returns a dict loaded from a json file.
Looks for file named `conffile` in same directory as `dataPath`
Returns {} if file not found
"""
if not confFilename:
return {}
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
try:
jsonBuf = reader.read(dataPath, filename=confFilename)
except FileNotFoundError:
return {}
params = json.loads(jsonBuf)
if 'format' in params:
raise Exception("Numerical format of value should be specified as 'valuetype', not 'format'")
if 'keyformat' in params:
raise Exception("Numerical format of key should be specified as 'keytype', not 'keyformat'")
return params
def writeSeriesConfig(outputDirPath, nkeys, nvalues, keyType='int16', valueType='int16',
confFilename="conf.json", overwrite=True, awsCredentialsOverride=None):
"""
Helper function to write out a conf.json file with required information to load Series binary data.
"""
import json
from thunder.rdds.fileio.writers import getFileWriterForPath
filewriterClass = getFileWriterForPath(outputDirPath)
# write configuration file
# config JSON keys are lowercased "valuetype", "keytype", not valueType, keyType
conf = {'input': outputDirPath,
'nkeys': nkeys, 'nvalues': nvalues,
'valuetype': str(valueType), 'keytype': str(keyType)}
confWriter = filewriterClass(outputDirPath, confFilename, overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
confWriter.writeFile(json.dumps(conf, indent=2))
# touch "SUCCESS" file as final action
successWriter = filewriterClass(outputDirPath, "SUCCESS", overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
successWriter.writeFile('') | en | 0.728846 | Provides SeriesLoader object and helpers, used to read Series data from disk or other filesystems. Loader object used to instantiate Series data stored in a variety of formats. Initialize a new SeriesLoader object. Parameters ---------- sparkcontext: SparkContext The pyspark SparkContext object used by the current Thunder environment. minPartitions: int minimum number of partitions to use when loading data. (Used by fromText, fromMatLocal, and fromNpyLocal) Create a Series object from a sequence of 1d numpy arrays on the driver. # recast singleton # check shape and dtype # generate linear keys Create a Series object from a sequence of numpy ndarrays resident in memory on the driver. The arrays will be interpreted as though each represents a single time point - effectively the same as if converting Images to a Series, with each array representing a volume image at a particular point in time. Thus in the resulting Series, the value of the record with key (0,0,0) will be array([arrays[0][0,0,0], arrays[1][0,0,0],... arrays[n][0,0,0]). The dimensions of the resulting Series will be *opposite* that of the passed numpy array. Their dtype will not be changed. # if passed a single array, cast it to a sequence of length 1 # check that shapes of passed arrays are consistent # get indices so that fastest index changes first # we do need to prepend a scheme here, b/c otherwise the Hadoop based readers # will adopt their default behavior and start looking on hdfs://. # this appears to already be a fully-qualified URI # this looks like a local path spec # check whether we look like an absolute or a relative path # need to make relative local paths absolute; our file scheme parsing isn't all that it could be. Loads Series data from text files. Parameters ---------- dataPath : string Specifies the file or files to be loaded. dataPath may be either a URI (with scheme specified) or a path on the local filesystem. If a path is passed (determined by the absence of a scheme component when attempting to parse as a URI), and it is not already a wildcard expression and does not end in <ext>, then it will be converted into a wildcard pattern by appending '/*.ext'. This conversion can be avoided by passing a "file://" URI. dtype: dtype or dtype specifier, default 'float64' # keytype, valuetype here violate camelCasing convention for consistence with JSON conf file format Collects parameters to use for binary series loading. Priority order is as follows: 1. parameters specified as keyword arguments; 2. parameters specified in a conf.json file on the local filesystem; 3. default parameters Returns ------- BinaryLoadParameters instance # filter dict to include only recognized field names: Throws ValueError if any of the field values in the passed namedtuple instance evaluate to False. Note this is okay only so long as zero is not a valid parameter value. Hmm. Load a Series object from a directory of binary files. Parameters ---------- dataPath : string URI or local filesystem path Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://", "s3n://", or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified by a glob-style expression using a single wildcard character '*'. newDtype : dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat' Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be cast to the requested `newdtype` if not None - see Data `astype()` method. casting : 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. maxPartitionSize : str, optional, default = '32mb' Maximum size of partitions as Java-style memory, will indirectly control the number of partitions Create an RDD of <string blocklabel, (int k-tuple indices, array of datatype values)> Parameters ---------- dataPath: string URI or local filesystem path Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://", "s3n://" or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified by a glob-style expression using a single wildcard character '*'. dims: tuple of positive int Dimensions of input image data, ordered with the fastest-changing dimension first. dtype: dtype or dtype specifier, optional, default 'int16' Numpy dtype of input stack data newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat' Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the requested `newdtype` - see numpy `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems (not s3). Returns --------- pair of (RDD, ntimepoints) RDD: sequence of keys, values pairs (call using flatMap) RDD Key: tuple of int zero-based indicies of position within original image volume RDD Value: numpy array of datatype series of values at position across loaded image volumes ntimepoints: int number of time points in returned series, determined from number of stack files found at dataPath newDtype: string string representation of numpy data type of returned blocks # integer division # for 3D stacks, do calculations to ensure that # different planes appear in distinct files # all but last dimension # get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane # evenly. This will always be at least one. # otherwise just round to make contents divide into nearly even blocks # copy size out from closure; will modify later: # get start position for this block # adjust if at end of file # loop over files, loading one block from each # dimensions are now linindex x time (images) # append subscript keys based on dimensions # zero-based # map over blocks # read first page of first file to get expected image size # keep reading pages until we reach the end of the file, in order to get number of planes: # get dimensions # get datatype # check PIL version to see whether it is actually pillow or indeed old PIL and choose # conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array # for more explanation. # use numpy's array() function # use our modified version of matplotlib's pil_to_array # intialize at one block per plane # keep dividing while cutting our size in half still leaves us bigger than the requested size # should end up no more than 2x blockSize. # integer division # make sure we're reading the plane fully # prevent bringing in self in closure: # keys will be planeidx, blockidx: # use multitif module to generate a fake, in-memory # one-page tif file. the advantage of this is that it # cuts way down on the many small reads that PIL/pillow # will make otherwise, which would be a problem for s3 # or Google Storage # read tif using PIL directly # dimensions are now linindex x time (images) # append subscript keys based on dimensions # zero-based # add plane index to end of keys # map over blocks Load a Series object directly from binary image stack files. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. dims: tuple of positive int Dimensions of input image data, ordered with the fastest-changing dimension first. ext: string, optional, default "stack" Extension required on data files to be loaded. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes). dtype: dtype or dtype specifier, optional, default 'int16' Numpy dtype of input stack data newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat' Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be cast to the requested `newdtype` if not None - see Data `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. startIdx, stopIdx: nonnegative int. optional. Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching `dataPath` and `ext`. Interpreted according to python slice indexing conventions. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems (not s3). Load a Series object from multipage tiff files. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. ext: string, optional, default "tif" Extension required on data files to be loaded. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes). newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat' Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be cast to the requested `newdtype` if not None - see Data `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. startIdx, stopIdx: nonnegative int. optional. Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching `dataPath` and `ext`. Interpreted according to python slice indexing conventions. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems (not s3). # prevent additional downstream checks for this path Write out data from binary image stack files in the Series data flat binary format. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. outputDirPath: string Path to a directory into which to write Series file output. An outputdir argument may be either a path on the local file system or a URI-like format, as in dataPath. dims: tuple of positive int Dimensions of input image data, ordered with the fastest-changing dimension first. ext: string, optional, default "stack" Extension required on data files to be loaded. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes). dtype: dtype or dtype specifier, optional, default 'int16' Numpy dtype of input stack data newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None - see Data `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. startIdx, stopIdx: nonnegative int. optional. Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching `dataPath` and `ext`. Interpreted according to python slice indexing conventions. overwrite: boolean, optional, default False If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist. # prevent additional downstream checks for this path Write out data from multipage tif files in the Series data flat binary format. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. outputDirPpath: string Path to a directory into which to write Series file output. An outputdir argument may be either a path on the local file system or a URI-like format, as in dataPath. ext: string, optional, default "stack" Extension required on data files to be loaded. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes). newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None - see Data `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. startIdx, stopIdx: nonnegative int. optional. Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching `dataPath` and `ext`. Interpreted according to python slice indexing conventions. overwrite: boolean, optional, default False If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist. # prevent additional downstream checks for this path Loads Series data stored in a Matlab .mat file. `datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem. Loads Series data stored in the numpy save() .npy format. `datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem. Returns a dict loaded from a json file. Looks for file named `conffile` in same directory as `dataPath` Returns {} if file not found Helper function to write out a conf.json file with required information to load Series binary data. # write configuration file # config JSON keys are lowercased "valuetype", "keytype", not valueType, keyType # touch "SUCCESS" file as final action | 2.508989 | 3 |
mxnet/local_forward.py | rai-project/onnx_examples | 0 | 10433 | # run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
| # run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
| en | 0.396913 | # run local models given a path, default to './mxnet_models/' # stop timer | 2.276259 | 2 |
tests/test_get_angles.py | Mopolino8/lammps-data-file | 13 | 10434 | <reponame>Mopolino8/lammps-data-file<gh_stars>10-100
from lammps_data.angles import get_angles
def test_separate_diatomic_molecules_should_have_no_angles():
bonds = [(0, 1), (2, 3)]
assert get_angles(bonds) == []
def test_molecule_with_two_bonds_should_have_one_angle():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_within_angle_tuples():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
bonds = [(1, 2), (0, 1)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_of_angle_tuples():
bonds = [(0, 1), (1, 2), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
bonds = [(1, 2), (0, 1), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
def test_tetrahedral_molecule_should_have_six_angles():
bonds = [(0, 1), (0, 2), (0, 3), (0, 4)]
assert get_angles(bonds) == [(1, 0, 2),
(1, 0, 3),
(1, 0, 4),
(2, 0, 3),
(2, 0, 4),
(3, 0, 4)]
| from lammps_data.angles import get_angles
def test_separate_diatomic_molecules_should_have_no_angles():
bonds = [(0, 1), (2, 3)]
assert get_angles(bonds) == []
def test_molecule_with_two_bonds_should_have_one_angle():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_within_angle_tuples():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
bonds = [(1, 2), (0, 1)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_of_angle_tuples():
bonds = [(0, 1), (1, 2), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
bonds = [(1, 2), (0, 1), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
def test_tetrahedral_molecule_should_have_six_angles():
bonds = [(0, 1), (0, 2), (0, 3), (0, 4)]
assert get_angles(bonds) == [(1, 0, 2),
(1, 0, 3),
(1, 0, 4),
(2, 0, 3),
(2, 0, 4),
(3, 0, 4)] | none | 1 | 2.678447 | 3 |
|
api/scheduler/migrations/0001_initial.py | jfaach/stock-app | 0 | 10435 | # Generated by Django 3.1.1 on 2020-12-16 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Scheduler',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minutes', models.IntegerField(default=15)),
],
),
]
| # Generated by Django 3.1.1 on 2020-12-16 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Scheduler',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minutes', models.IntegerField(default=15)),
],
),
]
| en | 0.818878 | # Generated by Django 3.1.1 on 2020-12-16 03:07 | 1.730323 | 2 |
9-Wine-Scaling.py | Pawel762/Class-7_homework | 0 | 10436 | from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
print('Pre scaling X')
print(X)
scaler = StandardScaler()
scaler.fit(X)
scaled_features = scaler.transform(X)
print('Post scaling X')
print(scaled_features)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, y, test_size=0.375)
| from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
print('Pre scaling X')
print(X)
scaler = StandardScaler()
scaler.fit(X)
scaled_features = scaler.transform(X)
print('Post scaling X')
print(scaled_features)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, y, test_size=0.375)
| none | 1 | 2.952666 | 3 |
|
tests/conftest.py | szkkteam/flask-starter | 0 | 10437 | <reponame>szkkteam/flask-starter<filename>tests/conftest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import os
import pytest
# Pip package imports
from collections import namedtuple
from flask import template_rendered
from flask_security.signals import (
reset_password_instructions_sent,
user_confirmed,
user_registered,
)
# Internal package imports
from backend.app import _create_app
from backend.config import TestConfig
from backend.extensions import db as db_ext
from backend.extensions.mail import mail
from ._client import (
ApiTestClient,
ApiTestResponse,
HtmlTestClient,
HtmlTestResponse,
)
from ._model_factory import ModelFactory
@pytest.fixture(autouse=True, scope='session')
def app():
app = _create_app(TestConfig)
#ctx = app.app_context()
ctx = app.test_request_context()
ctx.push()
yield app
ctx.pop()
@pytest.yield_fixture
def client(app):
app.response_class = HtmlTestResponse
app.test_client_class = HtmlTestClient
with app.test_client() as client:
yield client
@pytest.yield_fixture
def api_client(app):
app.response_class = ApiTestResponse
app.test_client_class = ApiTestClient
with app.test_client() as client:
yield client
@pytest.fixture(autouse=True, scope='session')
def db():
db_ext.create_all()
yield db_ext
db_ext.drop_all()
@pytest.fixture(autouse=True)
def db_session(db):
connection = db.engine.connect()
transaction = connection.begin()
session = db.create_scoped_session(options=dict(bind=connection, binds={}))
db.session = session
try:
yield session
finally:
transaction.rollback()
connection.close()
session.remove()
@pytest.fixture(scope='session')
def celery_config():
return {'broker_url': 'redis://localhost:6379/1',
'result_backend': 'redis://localhost:6379/1',
'accept_content': ('json', 'pickle')}
@pytest.fixture()
def templates(app):
records = []
RenderedTemplate = namedtuple('RenderedTemplate', 'template context')
def record(sender, template, context, **extra):
records.append(RenderedTemplate(template, context))
template_rendered.connect(record, app)
try:
yield records
finally:
template_rendered.disconnect(record, app)
@pytest.fixture()
def outbox():
with mail.record_messages() as messages:
yield messages
@pytest.fixture()
def registrations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
user_registered.connect(record, app)
try:
yield records
finally:
user_registered.disconnect(record, app)
@pytest.fixture()
def confirmations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs['user'])
print("Record: ", records[-1])
user_confirmed.connect(record, app)
try:
yield records
finally:
print("Disconnect record: ", records)
user_confirmed.disconnect(record, app)
@pytest.fixture()
def password_resets(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
reset_password_instructions_sent.connect(record, app)
try:
yield records
finally:
reset_password_instructions_sent.disconnect(record, app)
@pytest.fixture()
def user(model_factory):
yield model_factory.create('User', 'user')
@pytest.fixture()
def newslettersubscribe(model_factory):
yield model_factory.create('NewsletterSubscribe', 'newslettersubscribe')
@pytest.fixture()
def admin(model_factory):
yield model_factory.create('User', 'admin')
@pytest.fixture()
def models(request, model_factory):
mark = request.param
if mark is not None:
return model_factory.get_models(mark)
@pytest.fixture()
def model_factory(app, db_session):
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'model_fixtures')
yield ModelFactory(db_session, app.models, fixtures_dir)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import os
import pytest
# Pip package imports
from collections import namedtuple
from flask import template_rendered
from flask_security.signals import (
reset_password_instructions_sent,
user_confirmed,
user_registered,
)
# Internal package imports
from backend.app import _create_app
from backend.config import TestConfig
from backend.extensions import db as db_ext
from backend.extensions.mail import mail
from ._client import (
ApiTestClient,
ApiTestResponse,
HtmlTestClient,
HtmlTestResponse,
)
from ._model_factory import ModelFactory
@pytest.fixture(autouse=True, scope='session')
def app():
app = _create_app(TestConfig)
#ctx = app.app_context()
ctx = app.test_request_context()
ctx.push()
yield app
ctx.pop()
@pytest.yield_fixture
def client(app):
app.response_class = HtmlTestResponse
app.test_client_class = HtmlTestClient
with app.test_client() as client:
yield client
@pytest.yield_fixture
def api_client(app):
app.response_class = ApiTestResponse
app.test_client_class = ApiTestClient
with app.test_client() as client:
yield client
@pytest.fixture(autouse=True, scope='session')
def db():
db_ext.create_all()
yield db_ext
db_ext.drop_all()
@pytest.fixture(autouse=True)
def db_session(db):
connection = db.engine.connect()
transaction = connection.begin()
session = db.create_scoped_session(options=dict(bind=connection, binds={}))
db.session = session
try:
yield session
finally:
transaction.rollback()
connection.close()
session.remove()
@pytest.fixture(scope='session')
def celery_config():
return {'broker_url': 'redis://localhost:6379/1',
'result_backend': 'redis://localhost:6379/1',
'accept_content': ('json', 'pickle')}
@pytest.fixture()
def templates(app):
records = []
RenderedTemplate = namedtuple('RenderedTemplate', 'template context')
def record(sender, template, context, **extra):
records.append(RenderedTemplate(template, context))
template_rendered.connect(record, app)
try:
yield records
finally:
template_rendered.disconnect(record, app)
@pytest.fixture()
def outbox():
with mail.record_messages() as messages:
yield messages
@pytest.fixture()
def registrations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
user_registered.connect(record, app)
try:
yield records
finally:
user_registered.disconnect(record, app)
@pytest.fixture()
def confirmations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs['user'])
print("Record: ", records[-1])
user_confirmed.connect(record, app)
try:
yield records
finally:
print("Disconnect record: ", records)
user_confirmed.disconnect(record, app)
@pytest.fixture()
def password_resets(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
reset_password_instructions_sent.connect(record, app)
try:
yield records
finally:
reset_password_instructions_sent.disconnect(record, app)
@pytest.fixture()
def user(model_factory):
yield model_factory.create('User', 'user')
@pytest.fixture()
def newslettersubscribe(model_factory):
yield model_factory.create('NewsletterSubscribe', 'newslettersubscribe')
@pytest.fixture()
def admin(model_factory):
yield model_factory.create('User', 'admin')
@pytest.fixture()
def models(request, model_factory):
mark = request.param
if mark is not None:
return model_factory.get_models(mark)
@pytest.fixture()
def model_factory(app, db_session):
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'model_fixtures')
yield ModelFactory(db_session, app.models, fixtures_dir) | en | 0.463209 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Common Python library imports # Pip package imports # Internal package imports #ctx = app.app_context() | 1.88447 | 2 |
setup.py | YiuRULE/nats.py | 0 | 10438 | <gh_stars>0
from setuptools import setup
from nats.aio.client import __version__
EXTRAS = {
'nkeys': ['nkeys'],
}
setup(
name='nats-py',
version=__version__,
description='NATS client for Python',
long_description='Python client for NATS, a lightweight, high-performance cloud native messaging system',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
url='https://github.com/nats-io/nats.py',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2 License',
packages=['nats', 'nats.aio', 'nats.protocol', 'nats.js'],
zip_safe=True,
extras_require=EXTRAS
)
| from setuptools import setup
from nats.aio.client import __version__
EXTRAS = {
'nkeys': ['nkeys'],
}
setup(
name='nats-py',
version=__version__,
description='NATS client for Python',
long_description='Python client for NATS, a lightweight, high-performance cloud native messaging system',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
url='https://github.com/nats-io/nats.py',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2 License',
packages=['nats', 'nats.aio', 'nats.protocol', 'nats.js'],
zip_safe=True,
extras_require=EXTRAS
) | none | 1 | 1.238945 | 1 |
|
example_python_files/MagicDAQ,MABoard,FullDemo.py | MagicDAQ/magicdaq_docs | 1 | 10439 | ##############################################################
#*** MagicDAQ USB DAQ and M&A Board General Demo Script ***
##############################################################
#*** Websites ***
# MagicDAQ Website:
# https://www.magicdaq.com/
# API Docs Website:
# https://magicdaq.github.io/magicdaq_docs/
#*** Install MagicDAQ ***
# Download the MagicDAQ python package from pypi
# Run this command in a command prompt:
# python -m pip install magicdaq
# Further docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ
# MagicDAQ is only compatible with Python 3 on Windows. It does not work on Linux at the moment. It does not work with Python 2.
#*** Using Auto Code Complete With PyCharm ***
# Using a code editor like Pycharm and want to get auto complete working for the MagicDAQ package?
# Docs: https://magicdaq.github.io/magicdaq_docs/#/PyCharmCodeCompletion
##############################################################
#*** Imports ***
##############################################################
import sys
import time
# Import MagicDAQ
print('*** MagicDAQ Install Check ***')
print('')
try:
# Import MagicDAQDevice object
from magicdaq.api_class import MagicDAQDevice
# Create daq_one object
daq_one = MagicDAQDevice()
print('GOOD: MagicDAQ API is installed properly.')
# Get MagicDAQ Driver Version
driver_version = daq_one.get_driver_version()
if driver_version == 1.0:
print('GOOD: MagicDAQ Driver is installed properly.')
print('You are ready to use MagicDAQ!')
else:
print('ERROR: MagicDAQ Driver version not expected value: '+str(driver_version))
print('Try installing MagicDAQ using pip again.')
print('https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
except Exception as exception_text:
print('Original exception: ')
print(exception_text)
print('')
print('ERROR: Unable to import MagicDAQ API.')
print('Mostly likely, MagicDAQ has not been properly downloaded and installed using pip.')
print('Please consult MagicDAQ API Docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
sys.exit(0)
##############################################################
#*** MagicDAQ USB DAQ MDAQ300 Features Demo ***
##############################################################
# This portion of the script shows off some of the USB DAQ's features
# Hardware docs: https://www.magicdaq.com/product/magic-daq/
print('')
print('*** MagicDAQ USB DAQ Demo ***')
print('Ensure the USB DAQ is plugged into the computer using the USB cable.')
print('The DAQ does not need to be connected to the M&A board.')
print('')
user_input = input('Press any key to continue.')
#*** Open DAQ Device ***
# Remember, the daq_one object has already been created in the above 'Imports' section
# We must open the daq device before performing any hardware feature manipulation
# https://magicdaq.github.io/magicdaq_docs/#/MagicDAQ_Basics
daq_one.open_daq_device()
###############################################################
#*** Analog Output Demo: Constant, Sine, and PWM on AO1 Pin ***
###############################################################
print('')
print('--- Analog Output Demo: Constant, Sine, and PWM Output ---')
# Set constant 3 volt output voltage on AO1 pin
daq_one.set_analog_output(1,3)
print('Using an oscilloscope, place the scope probe on pin AO1 and connect the scope probe GND to one of the USB DAQs AGND pins')
print('You should now observe a constant 3V')
print('')
user_input = input('Press any key to continue.')
# Configure and start 300Hz sine wave with 2V amplitude on AO1 pin
daq_one.configure_analog_output_sine_wave(1,300,amplitude=2)
daq_one.start_analog_output_wave(1)
print('You should now observe a 300Hz sine wave with 2V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop previous wave
daq_one.stop_analog_output_wave(1)
# Configure and start PWM wave, 200 Hz, 50% duty cycle, 3.3V amplitude
daq_one.configure_analog_output_pwm_wave(1,200,50,amplitude=3.3)
daq_one.start_analog_output_wave(1)
print('You should now observe a 200Hz PWM wave, 50% duty cycle, with 3.3V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop the wave
daq_one.stop_analog_output_wave(1)
print('The wave should now stop. You could set it to GND using set_analog_ouput() if you wanted.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: PWM waves ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: PWM Waves ---')
# Configure a 50 KHz frequency, 75% duty cycle, continuous PWM Wave on the counter pin (CTR0)
# Note that unlike the analog output pins, the CTR0 pin always outputs at an amplitude of 3.3v when producing PWM waves
daq_one.configure_counter_pwm(50000,75)
# Start counter wave
daq_one.start_counter_pwm()
print('Place your scope probe on pin CTR0')
print('You should see a 50kHz, 75% duty cycle PWM wave.')
print('')
user_input = input('Press any key to continue.')
# Now stopping the counter PWM wave
daq_one.stop_counter_pwm()
print('The PWM wave will now stop.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: Pulse Counting ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: Pulse Counting ---')
print('Use a piece of wire to bridge CTR0 to DGND several times')
print('CTR0 has an internal pull up resistor. You are simulating a pulse pulling the voltage to GND.')
print('You will have 8 sec to simulate some pulses.')
print('')
user_input = input('Press any key when you are ready to start.')
# Start the Pulse Counter
# Pulses will be counted on the falling edge
daq_one.enable_pulse_counter()
# Sleep for 8 sec
time.sleep(8)
# Read number of pulses
print('Number of pulses counted: '+str(daq_one.read_pulse_counter()))
print('You are using a piece of wire, so it is likely bouncing on and off the screw terminal, counting many pulses')
print('')
user_input = input('Stop simulating pulses. Press any key to continue.')
print('')
print('Now clearing the pulse counter')
daq_one.clear_pulse_counter()
print('Pulse count after clearing: '+str(daq_one.read_pulse_counter()))
###############################################################
#*** Digital Pin Demo ***
###############################################################
print('')
print('--- Digital Pin Demo ---')
# Set P0.0 pin LOW
daq_one.set_digital_output(0,0)
print('Place scope probe on pin P0.0, pin should be LOW')
print('')
user_input = input('Press any key to continue.')
# Set P0.0 pin HIGH
daq_one.set_digital_output(0,1)
print('Place scope probe on pin P0.0, pin should be HIGH')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Analog Input Pin Demo ***
###############################################################
print('')
print('--- Analog Input Pin Demo ---')
# Single ended voltage measurement
print('Apply voltage to AI0 pin. If you dont have a power supply handy, you can run a wire from the +5V pin to the AI0 pin.')
print('')
user_input = input('Press any key to continue.')
print('Voltage measured at AI0: '+str(daq_one.read_analog_input(0)))
print('If you are using the +5V pin, remember that this voltage is derived from the USB Power supply, so it will be what ever your USB bus ir producing, probably something slightly less than 5V.')
# If you want to perform a differential input measurement
# daq_one.read_diff_analog_input()
# https://magicdaq.github.io/magicdaq_docs/#/read_diff_analog_input
###############################################################
#*** M&A Board Demo ***
###############################################################
# M&A Board hardware spec:
# https://www.magicdaq.com/product/ma-board-full-kit/
print('')
print('*** M&A Board Demo ***')
print('Ensure the USB DAQ is connected to the M&A board using the ribbon cable.')
print('Ribbon cable pin out on page 6 of: ')
print('https://www.magicdaq.com/mdaq350datasheet/')
print('Use the provided power cable to apply power to the M&A board.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Relay Demo ***
###############################################################
print('')
print('--- Relay Demo ---')
print('Setting all relays to closed.')
daq_one.set_digital_output(7, 1)
daq_one.set_digital_output(6, 1)
daq_one.set_digital_output(5, 1)
daq_one.set_digital_output(4, 1)
time.sleep(1)
relay_count = 1
digital_pin_count = 7
while relay_count <= 4:
print('Relay #: ' + str(relay_count) + ' Digital Pin #: ' + str(digital_pin_count))
# Set relay to open
print('Setting relay to OPEN.')
daq_one.set_digital_output(digital_pin_count, 0)
time.sleep(1)
# Increment counters
relay_count += 1
digital_pin_count -= 1
print('')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Vout Demo ***
###############################################################
print('')
print('--- Vout Demo ---')
print('Vout provides a variable voltage power output capable of up to 2A')
print('By characterizing your M&A board, or building a feedback loop; voltage accuracy of Vout can be made quite good.')
print('See notes on page 4 of the M&A data sheet.')
print('https://www.magicdaq.com/mdaq350datasheet/')
# See the M&A board data sheet for the equation that describes the Vout to Vout_set (0 and 2.77 here) relationship
print('')
print('Vout_set Set to 0V.')
print('Measure Vout with a multimeter. It should be about 10V')
daq_one.set_analog_output(0, 0)
print('')
user_input = input('Press any key to continue.')
print('Vout_set Set to 2.77V')
print('Measure Vout with a multimeter. It should be about 5V')
daq_one.set_analog_output(0, 2.77)
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Low Current Measurement Demo: A1 ***
###############################################################
print('')
print('--- A1 Low Current Measurement Demo ---')
print('Use the 3.3V board voltage and a 20K resistor to put 165uA through A1.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_4_voltage = daq_one.read_analog_input(4)
print('Read voltage: ' + str(pin_4_voltage))
calculated_current_amps = pin_4_voltage / (332 * 97.863)
ua_current = round((calculated_current_amps / .000001), 3)
print('Calculated uA current: ' + str(ua_current))
###############################################################
#*** Current Measurement Demo: A2 ***
###############################################################
print('')
print('--- A2 Current Measurement Demo (+/- 5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A2.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_5_voltage = daq_one.read_analog_input(5)
print('Read voltage: ' + str(pin_5_voltage))
calculated_current_amps = pin_5_voltage / (.01 * 200)
# ma_current = round((calculated_current_amps / .001), 3)
print('Calculated A current: ' + str(calculated_current_amps))
###############################################################
#*** Current Measurement Demo: A3 ***
###############################################################
print('')
print('--- A3 Current Measurement Demo (+/- 1.5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A3.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_6_voltage = daq_one.read_analog_input(6)
print('Read voltage: ' + str(pin_6_voltage))
calculated_current_amps = pin_6_voltage / (.033 * 200)
ma_current = round((calculated_current_amps / .001), 3)
print('Calculated mA current: ' + str(ma_current))
###############################################################
#*** Demo Complete. ***
###############################################################
# Close connection to daq
daq_one.close_daq_device()
| ##############################################################
#*** MagicDAQ USB DAQ and M&A Board General Demo Script ***
##############################################################
#*** Websites ***
# MagicDAQ Website:
# https://www.magicdaq.com/
# API Docs Website:
# https://magicdaq.github.io/magicdaq_docs/
#*** Install MagicDAQ ***
# Download the MagicDAQ python package from pypi
# Run this command in a command prompt:
# python -m pip install magicdaq
# Further docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ
# MagicDAQ is only compatible with Python 3 on Windows. It does not work on Linux at the moment. It does not work with Python 2.
#*** Using Auto Code Complete With PyCharm ***
# Using a code editor like Pycharm and want to get auto complete working for the MagicDAQ package?
# Docs: https://magicdaq.github.io/magicdaq_docs/#/PyCharmCodeCompletion
##############################################################
#*** Imports ***
##############################################################
import sys
import time
# Import MagicDAQ
print('*** MagicDAQ Install Check ***')
print('')
try:
# Import MagicDAQDevice object
from magicdaq.api_class import MagicDAQDevice
# Create daq_one object
daq_one = MagicDAQDevice()
print('GOOD: MagicDAQ API is installed properly.')
# Get MagicDAQ Driver Version
driver_version = daq_one.get_driver_version()
if driver_version == 1.0:
print('GOOD: MagicDAQ Driver is installed properly.')
print('You are ready to use MagicDAQ!')
else:
print('ERROR: MagicDAQ Driver version not expected value: '+str(driver_version))
print('Try installing MagicDAQ using pip again.')
print('https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
except Exception as exception_text:
print('Original exception: ')
print(exception_text)
print('')
print('ERROR: Unable to import MagicDAQ API.')
print('Mostly likely, MagicDAQ has not been properly downloaded and installed using pip.')
print('Please consult MagicDAQ API Docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
sys.exit(0)
##############################################################
#*** MagicDAQ USB DAQ MDAQ300 Features Demo ***
##############################################################
# This portion of the script shows off some of the USB DAQ's features
# Hardware docs: https://www.magicdaq.com/product/magic-daq/
print('')
print('*** MagicDAQ USB DAQ Demo ***')
print('Ensure the USB DAQ is plugged into the computer using the USB cable.')
print('The DAQ does not need to be connected to the M&A board.')
print('')
user_input = input('Press any key to continue.')
#*** Open DAQ Device ***
# Remember, the daq_one object has already been created in the above 'Imports' section
# We must open the daq device before performing any hardware feature manipulation
# https://magicdaq.github.io/magicdaq_docs/#/MagicDAQ_Basics
daq_one.open_daq_device()
###############################################################
#*** Analog Output Demo: Constant, Sine, and PWM on AO1 Pin ***
###############################################################
print('')
print('--- Analog Output Demo: Constant, Sine, and PWM Output ---')
# Set constant 3 volt output voltage on AO1 pin
daq_one.set_analog_output(1,3)
print('Using an oscilloscope, place the scope probe on pin AO1 and connect the scope probe GND to one of the USB DAQs AGND pins')
print('You should now observe a constant 3V')
print('')
user_input = input('Press any key to continue.')
# Configure and start 300Hz sine wave with 2V amplitude on AO1 pin
daq_one.configure_analog_output_sine_wave(1,300,amplitude=2)
daq_one.start_analog_output_wave(1)
print('You should now observe a 300Hz sine wave with 2V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop previous wave
daq_one.stop_analog_output_wave(1)
# Configure and start PWM wave, 200 Hz, 50% duty cycle, 3.3V amplitude
daq_one.configure_analog_output_pwm_wave(1,200,50,amplitude=3.3)
daq_one.start_analog_output_wave(1)
print('You should now observe a 200Hz PWM wave, 50% duty cycle, with 3.3V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop the wave
daq_one.stop_analog_output_wave(1)
print('The wave should now stop. You could set it to GND using set_analog_ouput() if you wanted.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: PWM waves ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: PWM Waves ---')
# Configure a 50 KHz frequency, 75% duty cycle, continuous PWM Wave on the counter pin (CTR0)
# Note that unlike the analog output pins, the CTR0 pin always outputs at an amplitude of 3.3v when producing PWM waves
daq_one.configure_counter_pwm(50000,75)
# Start counter wave
daq_one.start_counter_pwm()
print('Place your scope probe on pin CTR0')
print('You should see a 50kHz, 75% duty cycle PWM wave.')
print('')
user_input = input('Press any key to continue.')
# Now stopping the counter PWM wave
daq_one.stop_counter_pwm()
print('The PWM wave will now stop.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: Pulse Counting ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: Pulse Counting ---')
print('Use a piece of wire to bridge CTR0 to DGND several times')
print('CTR0 has an internal pull up resistor. You are simulating a pulse pulling the voltage to GND.')
print('You will have 8 sec to simulate some pulses.')
print('')
user_input = input('Press any key when you are ready to start.')
# Start the Pulse Counter
# Pulses will be counted on the falling edge
daq_one.enable_pulse_counter()
# Sleep for 8 sec
time.sleep(8)
# Read number of pulses
print('Number of pulses counted: '+str(daq_one.read_pulse_counter()))
print('You are using a piece of wire, so it is likely bouncing on and off the screw terminal, counting many pulses')
print('')
user_input = input('Stop simulating pulses. Press any key to continue.')
print('')
print('Now clearing the pulse counter')
daq_one.clear_pulse_counter()
print('Pulse count after clearing: '+str(daq_one.read_pulse_counter()))
###############################################################
#*** Digital Pin Demo ***
###############################################################
print('')
print('--- Digital Pin Demo ---')
# Set P0.0 pin LOW
daq_one.set_digital_output(0,0)
print('Place scope probe on pin P0.0, pin should be LOW')
print('')
user_input = input('Press any key to continue.')
# Set P0.0 pin HIGH
daq_one.set_digital_output(0,1)
print('Place scope probe on pin P0.0, pin should be HIGH')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Analog Input Pin Demo ***
###############################################################
print('')
print('--- Analog Input Pin Demo ---')
# Single ended voltage measurement
print('Apply voltage to AI0 pin. If you dont have a power supply handy, you can run a wire from the +5V pin to the AI0 pin.')
print('')
user_input = input('Press any key to continue.')
print('Voltage measured at AI0: '+str(daq_one.read_analog_input(0)))
print('If you are using the +5V pin, remember that this voltage is derived from the USB Power supply, so it will be what ever your USB bus ir producing, probably something slightly less than 5V.')
# If you want to perform a differential input measurement
# daq_one.read_diff_analog_input()
# https://magicdaq.github.io/magicdaq_docs/#/read_diff_analog_input
###############################################################
#*** M&A Board Demo ***
###############################################################
# M&A Board hardware spec:
# https://www.magicdaq.com/product/ma-board-full-kit/
print('')
print('*** M&A Board Demo ***')
print('Ensure the USB DAQ is connected to the M&A board using the ribbon cable.')
print('Ribbon cable pin out on page 6 of: ')
print('https://www.magicdaq.com/mdaq350datasheet/')
print('Use the provided power cable to apply power to the M&A board.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Relay Demo ***
###############################################################
print('')
print('--- Relay Demo ---')
print('Setting all relays to closed.')
daq_one.set_digital_output(7, 1)
daq_one.set_digital_output(6, 1)
daq_one.set_digital_output(5, 1)
daq_one.set_digital_output(4, 1)
time.sleep(1)
relay_count = 1
digital_pin_count = 7
while relay_count <= 4:
print('Relay #: ' + str(relay_count) + ' Digital Pin #: ' + str(digital_pin_count))
# Set relay to open
print('Setting relay to OPEN.')
daq_one.set_digital_output(digital_pin_count, 0)
time.sleep(1)
# Increment counters
relay_count += 1
digital_pin_count -= 1
print('')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Vout Demo ***
###############################################################
print('')
print('--- Vout Demo ---')
print('Vout provides a variable voltage power output capable of up to 2A')
print('By characterizing your M&A board, or building a feedback loop; voltage accuracy of Vout can be made quite good.')
print('See notes on page 4 of the M&A data sheet.')
print('https://www.magicdaq.com/mdaq350datasheet/')
# See the M&A board data sheet for the equation that describes the Vout to Vout_set (0 and 2.77 here) relationship
print('')
print('Vout_set Set to 0V.')
print('Measure Vout with a multimeter. It should be about 10V')
daq_one.set_analog_output(0, 0)
print('')
user_input = input('Press any key to continue.')
print('Vout_set Set to 2.77V')
print('Measure Vout with a multimeter. It should be about 5V')
daq_one.set_analog_output(0, 2.77)
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Low Current Measurement Demo: A1 ***
###############################################################
print('')
print('--- A1 Low Current Measurement Demo ---')
print('Use the 3.3V board voltage and a 20K resistor to put 165uA through A1.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_4_voltage = daq_one.read_analog_input(4)
print('Read voltage: ' + str(pin_4_voltage))
calculated_current_amps = pin_4_voltage / (332 * 97.863)
ua_current = round((calculated_current_amps / .000001), 3)
print('Calculated uA current: ' + str(ua_current))
###############################################################
#*** Current Measurement Demo: A2 ***
###############################################################
print('')
print('--- A2 Current Measurement Demo (+/- 5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A2.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_5_voltage = daq_one.read_analog_input(5)
print('Read voltage: ' + str(pin_5_voltage))
calculated_current_amps = pin_5_voltage / (.01 * 200)
# ma_current = round((calculated_current_amps / .001), 3)
print('Calculated A current: ' + str(calculated_current_amps))
###############################################################
#*** Current Measurement Demo: A3 ***
###############################################################
print('')
print('--- A3 Current Measurement Demo (+/- 1.5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A3.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_6_voltage = daq_one.read_analog_input(6)
print('Read voltage: ' + str(pin_6_voltage))
calculated_current_amps = pin_6_voltage / (.033 * 200)
ma_current = round((calculated_current_amps / .001), 3)
print('Calculated mA current: ' + str(ma_current))
###############################################################
#*** Demo Complete. ***
###############################################################
# Close connection to daq
daq_one.close_daq_device()
| de | 0.403097 | ############################################################## #*** MagicDAQ USB DAQ and M&A Board General Demo Script *** ############################################################## #*** Websites *** # MagicDAQ Website: # https://www.magicdaq.com/ # API Docs Website: # https://magicdaq.github.io/magicdaq_docs/ #*** Install MagicDAQ *** # Download the MagicDAQ python package from pypi # Run this command in a command prompt: # python -m pip install magicdaq # Further docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ # MagicDAQ is only compatible with Python 3 on Windows. It does not work on Linux at the moment. It does not work with Python 2. #*** Using Auto Code Complete With PyCharm *** # Using a code editor like Pycharm and want to get auto complete working for the MagicDAQ package? # Docs: https://magicdaq.github.io/magicdaq_docs/#/PyCharmCodeCompletion ############################################################## #*** Imports *** ############################################################## # Import MagicDAQ # Import MagicDAQDevice object # Create daq_one object # Get MagicDAQ Driver Version #/Install_MagicDAQ') #/Install_MagicDAQ') ############################################################## #*** MagicDAQ USB DAQ MDAQ300 Features Demo *** ############################################################## # This portion of the script shows off some of the USB DAQ's features # Hardware docs: https://www.magicdaq.com/product/magic-daq/ #*** Open DAQ Device *** # Remember, the daq_one object has already been created in the above 'Imports' section # We must open the daq device before performing any hardware feature manipulation # https://magicdaq.github.io/magicdaq_docs/#/MagicDAQ_Basics ############################################################### #*** Analog Output Demo: Constant, Sine, and PWM on AO1 Pin *** ############################################################### # Set constant 3 volt output voltage on AO1 pin # Configure and start 300Hz sine wave with 2V amplitude on AO1 pin # Stop previous wave # Configure and start PWM wave, 200 Hz, 50% duty cycle, 3.3V amplitude # Stop the wave ############################################################### #*** Pulse Counter Pin Demo: PWM waves *** ############################################################### # Configure a 50 KHz frequency, 75% duty cycle, continuous PWM Wave on the counter pin (CTR0) # Note that unlike the analog output pins, the CTR0 pin always outputs at an amplitude of 3.3v when producing PWM waves # Start counter wave # Now stopping the counter PWM wave ############################################################### #*** Pulse Counter Pin Demo: Pulse Counting *** ############################################################### # Start the Pulse Counter # Pulses will be counted on the falling edge # Sleep for 8 sec # Read number of pulses ############################################################### #*** Digital Pin Demo *** ############################################################### # Set P0.0 pin LOW # Set P0.0 pin HIGH ############################################################### #*** Analog Input Pin Demo *** ############################################################### # Single ended voltage measurement # If you want to perform a differential input measurement # daq_one.read_diff_analog_input() # https://magicdaq.github.io/magicdaq_docs/#/read_diff_analog_input ############################################################### #*** M&A Board Demo *** ############################################################### # M&A Board hardware spec: # https://www.magicdaq.com/product/ma-board-full-kit/ ############################################################### #*** Relay Demo *** ############################################################### #: ' + str(relay_count) + ' Digital Pin #: ' + str(digital_pin_count)) # Set relay to open # Increment counters ############################################################### #*** Vout Demo *** ############################################################### # See the M&A board data sheet for the equation that describes the Vout to Vout_set (0 and 2.77 here) relationship ############################################################### #*** Low Current Measurement Demo: A1 *** ############################################################### # See the M&A board data sheet for the equation that describes the Vout to current relationship ############################################################### #*** Current Measurement Demo: A2 *** ############################################################### # See the M&A board data sheet for the equation that describes the Vout to current relationship # ma_current = round((calculated_current_amps / .001), 3) ############################################################### #*** Current Measurement Demo: A3 *** ############################################################### # See the M&A board data sheet for the equation that describes the Vout to current relationship ############################################################### #*** Demo Complete. *** ############################################################### # Close connection to daq | 2.34102 | 2 |
src/onenutil/schemas/__init__.py | LemurPwned/onenote-utils | 0 | 10440 | from .results import (ArticleSearchResult, EmbeddingsResult, SearchResult,
TagResult, ZoteroExtractionResult)
__all__ = [
"TagResult", "EmbeddingsResult", "ZoteroExtractionResult", "SearchResult",
"ArticleSearchResult"
]
| from .results import (ArticleSearchResult, EmbeddingsResult, SearchResult,
TagResult, ZoteroExtractionResult)
__all__ = [
"TagResult", "EmbeddingsResult", "ZoteroExtractionResult", "SearchResult",
"ArticleSearchResult"
]
| none | 1 | 1.114453 | 1 |
|
src/account/api/serializers.py | amirpsd/drf_blog_api | 33 | 10441 | <filename>src/account/api/serializers.py
from django.contrib.auth import get_user_model
from rest_framework import serializers
class UsersListSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"author",
]
class UserDetailUpdateDeleteSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
exclude = [
"password",
]
class UserProfileSerializer(serializers.ModelSerializer):
phone = serializers.ReadOnlyField()
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"two_step_password",
]
class AuthenticationSerializer(serializers.Serializer):
phone = serializers.CharField(
max_length=12,
min_length=12,
)
def validate_phone(self, value):
from re import match
if not match("^989\d{2}\s*?\d{3}\s*?\d{4}$", value):
raise serializers.ValidationError("Invalid phone number.")
return value
class OtpSerializer(serializers.Serializer):
code = serializers.CharField(
max_length=6,
min_length=6,
)
password = serializers.CharField(
max_length=20,
required=False,
)
def validate_code(self, value):
try:
int(value)
except ValueError as _:
raise serializers.ValidationError("Invalid Code.")
return value
class GetTwoStepPasswordSerializer(serializers.Serializer):
"""
Base serializer two-step-password.
"""
password = serializers.CharField(
max_length=20,
)
confirm_password = serializers.CharField(
max_length=20,
)
def validate(self, data):
password = data.get('password')
confirm_password = data.get('confirm_password')
if password != confirm_password:
raise serializers.ValidationError(
{"Error": "Your passwords didn't match."}
)
return data
class ChangeTwoStepPasswordSerializer(GetTwoStepPasswordSerializer):
old_password = serializers.CharField(
max_length=20,
)
| <filename>src/account/api/serializers.py
from django.contrib.auth import get_user_model
from rest_framework import serializers
class UsersListSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"author",
]
class UserDetailUpdateDeleteSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
exclude = [
"password",
]
class UserProfileSerializer(serializers.ModelSerializer):
phone = serializers.ReadOnlyField()
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"two_step_password",
]
class AuthenticationSerializer(serializers.Serializer):
phone = serializers.CharField(
max_length=12,
min_length=12,
)
def validate_phone(self, value):
from re import match
if not match("^989\d{2}\s*?\d{3}\s*?\d{4}$", value):
raise serializers.ValidationError("Invalid phone number.")
return value
class OtpSerializer(serializers.Serializer):
code = serializers.CharField(
max_length=6,
min_length=6,
)
password = serializers.CharField(
max_length=20,
required=False,
)
def validate_code(self, value):
try:
int(value)
except ValueError as _:
raise serializers.ValidationError("Invalid Code.")
return value
class GetTwoStepPasswordSerializer(serializers.Serializer):
"""
Base serializer two-step-password.
"""
password = serializers.CharField(
max_length=20,
)
confirm_password = serializers.CharField(
max_length=20,
)
def validate(self, data):
password = data.get('password')
confirm_password = data.get('confirm_password')
if password != confirm_password:
raise serializers.ValidationError(
{"Error": "Your passwords didn't match."}
)
return data
class ChangeTwoStepPasswordSerializer(GetTwoStepPasswordSerializer):
old_password = serializers.CharField(
max_length=20,
)
| en | 0.776362 | Base serializer two-step-password. | 2.349587 | 2 |
generate_figure9.py | IBM/Simultaneous-diagonalization | 0 | 10442 | # Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# <NAME> and <NAME>, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
| # Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# <NAME> and <NAME>, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
| en | 0.8377 | # Copyright 2022 IBM Inc. All rights reserved # SPDX-License-Identifier: Apache2.0 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is part of the code to reproduce the results in the paper: # <NAME> and <NAME>, "Circuit optimization of Hamiltonian # simulation by simultaneous diagonalization of Pauli clusters," Quantum 4, # p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322 # Make sure the figure directory exists # Create the test problem # Apply diagonalization and get the final Z matrix # Plot the results # Generate histogram of actual permutations # Count is always even | 2.049438 | 2 |
undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py | NASA-DEVELOP/dnppy | 65 | 10443 | <filename>undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: qgeddes
#
# Created: 25/04/2013
# Copyright: (c) qgeddes 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import L7GapFiller
Scenes=arcpy.GetParameterAsText(0)
Scenes=Scenes.split(";")
OutputFolder=arcpy.GetParameterAsText(1)
OutputFile= arcpy.GetParameterAsText(2)
Output=OutputFolder+"\\"+OutputFile
CloudMasks= arcpy.GetParameterAsText(3)
CloudMasks= CloudMasks.split(";")
Z=arcpy.GetParameter(4)
arcpy.AddMessage(Z)
arcpy.env.scratchWorkspace=OutputFolder
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput=True
L7GapFiller.L7GapFill(Scenes, Output,CloudMasks,Z)
| <filename>undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: qgeddes
#
# Created: 25/04/2013
# Copyright: (c) qgeddes 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import L7GapFiller
Scenes=arcpy.GetParameterAsText(0)
Scenes=Scenes.split(";")
OutputFolder=arcpy.GetParameterAsText(1)
OutputFile= arcpy.GetParameterAsText(2)
Output=OutputFolder+"\\"+OutputFile
CloudMasks= arcpy.GetParameterAsText(3)
CloudMasks= CloudMasks.split(";")
Z=arcpy.GetParameter(4)
arcpy.AddMessage(Z)
arcpy.env.scratchWorkspace=OutputFolder
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput=True
L7GapFiller.L7GapFill(Scenes, Output,CloudMasks,Z)
| en | 0.197105 | #------------------------------------------------------------------------------- # Name: module1 # Purpose: # # Author: qgeddes # # Created: 25/04/2013 # Copyright: (c) qgeddes 2013 # Licence: <your licence> #------------------------------------------------------------------------------- | 1.602437 | 2 |
tests/sentry/api/serializers/test_saved_search.py | practo/sentry | 4 | 10444 | <filename>tests/sentry/api/serializers/test_saved_search.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import SavedSearch
from sentry.models.savedsearch import DEFAULT_SAVED_SEARCHES
from sentry.testutils import TestCase
class SavedSearchSerializerTest(TestCase):
def test_simple(self):
search = SavedSearch.objects.create(
project=self.project,
name='Something',
query='some query'
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] == six.text_type(search.project_id)
assert result['name'] == search.name
assert result['query'] == search.query
assert result['isDefault'] == search.is_default
assert result['isUserDefault'] == search.is_default
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert not result['isGlobal']
def test_global(self):
default_saved_search = DEFAULT_SAVED_SEARCHES[0]
search = SavedSearch(
name=default_saved_search['name'],
query=default_saved_search['query'],
is_global=True,
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] is None
assert result['name'] == search.name
assert result['query'] == search.query
assert not result['isDefault']
assert not result['isUserDefault']
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert result['isGlobal']
| <filename>tests/sentry/api/serializers/test_saved_search.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import SavedSearch
from sentry.models.savedsearch import DEFAULT_SAVED_SEARCHES
from sentry.testutils import TestCase
class SavedSearchSerializerTest(TestCase):
def test_simple(self):
search = SavedSearch.objects.create(
project=self.project,
name='Something',
query='some query'
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] == six.text_type(search.project_id)
assert result['name'] == search.name
assert result['query'] == search.query
assert result['isDefault'] == search.is_default
assert result['isUserDefault'] == search.is_default
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert not result['isGlobal']
def test_global(self):
default_saved_search = DEFAULT_SAVED_SEARCHES[0]
search = SavedSearch(
name=default_saved_search['name'],
query=default_saved_search['query'],
is_global=True,
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] is None
assert result['name'] == search.name
assert result['query'] == search.query
assert not result['isDefault']
assert not result['isUserDefault']
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert result['isGlobal']
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.28036 | 2 |
xastropy/files/general.py | bpholden/xastropy | 3 | 10445 | """
#;+
#; NAME:
#; general
#; Version 1.0
#;
#; PURPOSE:
#; Module for monkeying with files and filenames
#; 172Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
# Import libraries
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import os, pdb
#### ###############################
# Deal with .gz extensions, usually on FITS files
# See if filenm exists, if so pass it back
#
def chk_for_gz(filenm,chk=None):
import os, pdb
# File exist?
if os.path.lexists(filenm):
chk=1
return filenm, chk
# .gz already
if filenm.find('.gz') > 0:
chk=0
return filenm, chk
# Add .gz
if os.path.lexists(filenm+'.gz'):
chk=1
return filenm+'.gz', chk
else:
chk=0
return filenm, chk
| """
#;+
#; NAME:
#; general
#; Version 1.0
#;
#; PURPOSE:
#; Module for monkeying with files and filenames
#; 172Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
# Import libraries
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import os, pdb
#### ###############################
# Deal with .gz extensions, usually on FITS files
# See if filenm exists, if so pass it back
#
def chk_for_gz(filenm,chk=None):
import os, pdb
# File exist?
if os.path.lexists(filenm):
chk=1
return filenm, chk
# .gz already
if filenm.find('.gz') > 0:
chk=0
return filenm, chk
# Add .gz
if os.path.lexists(filenm+'.gz'):
chk=1
return filenm+'.gz', chk
else:
chk=0
return filenm, chk
| en | 0.43099 | #;+ #; NAME: #; general #; Version 1.0 #; #; PURPOSE: #; Module for monkeying with files and filenames #; 172Sep-2014 by JXP #;- #;------------------------------------------------------------------------------ # Import libraries #### ############################### # Deal with .gz extensions, usually on FITS files # See if filenm exists, if so pass it back # # File exist? # .gz already # Add .gz | 2.575205 | 3 |
setup.py | muatik/genderizer | 54 | 10446 | #!/usr/bin/env python
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
setup(name='genderizer',
version='0.1.2.3',
license='MIT',
description='Genderizer tries to infer gender information looking at first name and/or making text analysis',
long_description=open('README.md').read(),
url='https://github.com/muatik/genderizer',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['genderizer'],
package_data={'genderizer': ['data/*']},
platforms='any') | #!/usr/bin/env python
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
setup(name='genderizer',
version='0.1.2.3',
license='MIT',
description='Genderizer tries to infer gender information looking at first name and/or making text analysis',
long_description=open('README.md').read(),
url='https://github.com/muatik/genderizer',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['genderizer'],
package_data={'genderizer': ['data/*']},
platforms='any') | ru | 0.26433 | #!/usr/bin/env python | 1.385871 | 1 |
ingestion/tests/unit/great_expectations/test_ometa_validation_action.py | ulixius9/OpenMetadata | 0 | 10447 | # Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suite for the action module implementation
"""
import os
from unittest import mock
from jinja2 import Environment
from pytest import mark
from metadata.great_expectations.action import OpenMetadataValidationAction
from metadata.great_expectations.utils.ometa_config_handler import render_template
@mark.parametrize(
"input,expected",
[
(None, "list_entities"),
("service_name", "get_by_name"),
],
)
def test_get_table_entity(input, expected, mocked_ometa, mocked_ge_data_context):
"""Test get table entity"""
ometa_validation = OpenMetadataValidationAction(
data_context=mocked_ge_data_context,
config_file_path="my/config/path",
ometa_service_name=input,
)
res = ometa_validation._get_table_entity("database", "schema", "table")
assert res._type == expected
def test_create_jinja_environment(fixture_jinja_environment):
"""Test create jinja environment"""
assert isinstance(fixture_jinja_environment, Environment)
@mock.patch.dict(os.environ, {"API_VERSION": "v1"})
def test_render_template(fixture_jinja_environment):
"""Test create jinja environment"""
tmplt = render_template(fixture_jinja_environment)
assert tmplt == "hostPort: http://localhost:8585\napiVersion: v1"
| # Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suite for the action module implementation
"""
import os
from unittest import mock
from jinja2 import Environment
from pytest import mark
from metadata.great_expectations.action import OpenMetadataValidationAction
from metadata.great_expectations.utils.ometa_config_handler import render_template
@mark.parametrize(
"input,expected",
[
(None, "list_entities"),
("service_name", "get_by_name"),
],
)
def test_get_table_entity(input, expected, mocked_ometa, mocked_ge_data_context):
"""Test get table entity"""
ometa_validation = OpenMetadataValidationAction(
data_context=mocked_ge_data_context,
config_file_path="my/config/path",
ometa_service_name=input,
)
res = ometa_validation._get_table_entity("database", "schema", "table")
assert res._type == expected
def test_create_jinja_environment(fixture_jinja_environment):
"""Test create jinja environment"""
assert isinstance(fixture_jinja_environment, Environment)
@mock.patch.dict(os.environ, {"API_VERSION": "v1"})
def test_render_template(fixture_jinja_environment):
"""Test create jinja environment"""
tmplt = render_template(fixture_jinja_environment)
assert tmplt == "hostPort: http://localhost:8585\napiVersion: v1"
| en | 0.785626 | # Copyright 2022 Collate # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test suite for the action module implementation Test get table entity Test create jinja environment Test create jinja environment | 1.950374 | 2 |
tests/integration/Containers.py | adnrs96/runtime | 0 | 10448 | # -*- coding: utf-8 -*-
from storyruntime.Containers import Containers
from storyruntime.constants.ServiceConstants import ServiceConstants
import storyscript
def test_containers_format_command(story):
"""
Ensures a simple resolve can be performed
"""
story_text = 'alpine echo msg:"foo"\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
'arguments': {'msg': {'type': 'string'}}
}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo', '{"msg":"foo"}']
def test_containers_format_command_no_arguments(story):
story_text = 'alpine echo\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo']
| # -*- coding: utf-8 -*-
from storyruntime.Containers import Containers
from storyruntime.constants.ServiceConstants import ServiceConstants
import storyscript
def test_containers_format_command(story):
"""
Ensures a simple resolve can be performed
"""
story_text = 'alpine echo msg:"foo"\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
'arguments': {'msg': {'type': 'string'}}
}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo', '{"msg":"foo"}']
def test_containers_format_command_no_arguments(story):
story_text = 'alpine echo\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo']
| en | 0.875671 | # -*- coding: utf-8 -*- Ensures a simple resolve can be performed | 2.585179 | 3 |
project_name/core/admin.py | cosmunsoftwares/django-boilerplate | 3 | 10449 | from django.contrib import admin
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None, renderer=None):
output = []
if value and getattr(value, "url", None):
output.append(u'<a href="%s" target="_blank">%s</a>' % (value.url, thumbnail(value)))
output.append(super(AdminFileWidget, self).render(name, value, attrs, renderer))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def redirect_one_object(model, obj):
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')
if obj:
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/')
return response
def thumbnail(obj, size='col-md-2'):
return mark_safe('<img src="{}" class="img-thumbnail {} p-0">'.format(obj.url, size))
| from django.contrib import admin
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None, renderer=None):
output = []
if value and getattr(value, "url", None):
output.append(u'<a href="%s" target="_blank">%s</a>' % (value.url, thumbnail(value)))
output.append(super(AdminFileWidget, self).render(name, value, attrs, renderer))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def redirect_one_object(model, obj):
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')
if obj:
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/')
return response
def thumbnail(obj, size='col-md-2'):
return mark_safe('<img src="{}" class="img-thumbnail {} p-0">'.format(obj.url, size))
| none | 1 | 2.022962 | 2 |
|
src/5vents.py | subhash686/aoc-2021 | 0 | 10450 | <reponame>subhash686/aoc-2021
import os
plane = [[0 for i in range(1000)] for j in range(1000)]
count = [0]
def overlapping_vents():
path = os.getcwd()
file_path = os.path.join(path, 'vents.txt')
file1 = open(file_path, 'r')
Lines = file1.readlines()
for line in Lines:
input = line.strip()
points = input.split(" -> ")
plot(points[0], points[1])
print(count[0])
def plot(point1, point2):
p1 = point1.split(",")
p2 = point2.split(",")
x1 = int(p1[0])
x2 = int(p2[0])
y1 = int(p1[1])
y2 = int(p2[1])
if x1 == x2 and y1 == y2:
addpoints(x1, y1)
elif x1 == x2:
if y1 > y2:
y1, y2 = y2, y1
for y in range(y1, y2+1):
addpoints(x1, y)
elif y1 == y2:
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, y1)
else:
slope = (y2-y1)/ (x2-x1)
intercept = y1 - (x1 * slope)
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, int(x*slope)+int(intercept))
def addpoints(x, y):
if plane[x][y] == 1:
count[0] +=1
plane[x][y] += 1
if __name__ == "__main__":
overlapping_vents()
| import os
plane = [[0 for i in range(1000)] for j in range(1000)]
count = [0]
def overlapping_vents():
path = os.getcwd()
file_path = os.path.join(path, 'vents.txt')
file1 = open(file_path, 'r')
Lines = file1.readlines()
for line in Lines:
input = line.strip()
points = input.split(" -> ")
plot(points[0], points[1])
print(count[0])
def plot(point1, point2):
p1 = point1.split(",")
p2 = point2.split(",")
x1 = int(p1[0])
x2 = int(p2[0])
y1 = int(p1[1])
y2 = int(p2[1])
if x1 == x2 and y1 == y2:
addpoints(x1, y1)
elif x1 == x2:
if y1 > y2:
y1, y2 = y2, y1
for y in range(y1, y2+1):
addpoints(x1, y)
elif y1 == y2:
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, y1)
else:
slope = (y2-y1)/ (x2-x1)
intercept = y1 - (x1 * slope)
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, int(x*slope)+int(intercept))
def addpoints(x, y):
if plane[x][y] == 1:
count[0] +=1
plane[x][y] += 1
if __name__ == "__main__":
overlapping_vents() | none | 1 | 3.292151 | 3 |
|
problems/test_0073_m_plus_n_space.py | chrisxue815/leetcode_python | 1 | 10451 | import unittest
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
rows = [0] * len(matrix)
cols = [0] * len(matrix[0])
for i, row in enumerate(matrix):
for j, num in enumerate(row):
if not num:
rows[i] = 1
cols[j] = 1
for row, num in enumerate(rows):
if num:
for j in range(len(matrix[0])):
matrix[row][j] = 0
for col, num in enumerate(cols):
if num:
for i in range(len(matrix)):
matrix[i][col] = 0
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2, 0],
[1, 2, 3],
[0, 2, 3],
],
[
[0, 0, 0],
[0, 2, 0],
[0, 0, 0],
]
)
def _test(self, matrix, expected):
Solution().setZeroes(matrix)
self.assertEqual(expected, matrix)
if __name__ == '__main__':
unittest.main()
| import unittest
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
rows = [0] * len(matrix)
cols = [0] * len(matrix[0])
for i, row in enumerate(matrix):
for j, num in enumerate(row):
if not num:
rows[i] = 1
cols[j] = 1
for row, num in enumerate(rows):
if num:
for j in range(len(matrix[0])):
matrix[row][j] = 0
for col, num in enumerate(cols):
if num:
for i in range(len(matrix)):
matrix[i][col] = 0
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2, 0],
[1, 2, 3],
[0, 2, 3],
],
[
[0, 0, 0],
[0, 2, 0],
[0, 0, 0],
]
)
def _test(self, matrix, expected):
Solution().setZeroes(matrix)
self.assertEqual(expected, matrix)
if __name__ == '__main__':
unittest.main()
| en | 0.397521 | :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead. | 3.480713 | 3 |
xlsxwriter/test/worksheet/test_write_print_options.py | Aeon1/XlsxWriter | 2 | 10452 | <reponame>Aeon1/XlsxWriter<filename>xlsxwriter/test/worksheet/test_write_print_options.py
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, <NAME>, <EMAIL>
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWritePrintOptions(unittest.TestCase):
"""
Test the Worksheet _write_print_options() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_print_options_default(self):
"""Test the _write_print_options() method without options"""
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_hcenter(self):
"""Test the _write_print_options() method with horizontal center"""
self.worksheet.center_horizontally()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_vcenter(self):
"""Test the _write_print_options() method with vertical center"""
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_center(self):
"""Test the _write_print_options() method with horiz + vert center"""
self.worksheet.center_horizontally()
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1" verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_default(self):
"""Test the _write_print_options() method with default value"""
self.worksheet.hide_gridlines()
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_0(self):
"""Test the _write_print_options() method with 0 value"""
self.worksheet.hide_gridlines(0)
self.worksheet._write_print_options()
exp = """<printOptions gridLines="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, <NAME>, <EMAIL>
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWritePrintOptions(unittest.TestCase):
"""
Test the Worksheet _write_print_options() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_print_options_default(self):
"""Test the _write_print_options() method without options"""
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_hcenter(self):
"""Test the _write_print_options() method with horizontal center"""
self.worksheet.center_horizontally()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_vcenter(self):
"""Test the _write_print_options() method with vertical center"""
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_center(self):
"""Test the _write_print_options() method with horiz + vert center"""
self.worksheet.center_horizontally()
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1" verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_default(self):
"""Test the _write_print_options() method with default value"""
self.worksheet.hide_gridlines()
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_0(self):
"""Test the _write_print_options() method with 0 value"""
self.worksheet.hide_gridlines(0)
self.worksheet._write_print_options()
exp = """<printOptions gridLines="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp) | en | 0.480874 | ############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2019, <NAME>, <EMAIL> # Test the Worksheet _write_print_options() method. Test the _write_print_options() method without options Test the _write_print_options() method with horizontal center <printOptions horizontalCentered="1"/> Test the _write_print_options() method with vertical center <printOptions verticalCentered="1"/> Test the _write_print_options() method with horiz + vert center <printOptions horizontalCentered="1" verticalCentered="1"/> Test the _write_print_options() method with default value Test the _write_print_options() method with 0 value <printOptions gridLines="1"/> | 2.834799 | 3 |
neo4j_helper.py | smartaec/OpenBridgeGraph | 0 | 10453 | <reponame>smartaec/OpenBridgeGraph<filename>neo4j_helper.py
from neo4j.v1 import GraphDatabase #neo4j==1.7.0
uri="bolt://localhost:7687"
driver=GraphDatabase.driver(uri, auth=("neo4j", "testneo4j"))
def execute_queries(scripts,message=None):
with driver.session() as session:
tx=session.begin_transaction()
res=tx.run(';'.join(scripts))
tx.commit()
return res
def execute_query(script,message=None):
with driver.session() as session:
return session.run(script,message)
def execute_read(cypher_func,message):
with driver.session() as session:
return session.read_transaction(cypher_func,message)
def execute_write(cypher_func,message):
with driver.session() as session:
return session.write_transaction(cypher_func,message)
def run_query(tx,script):
return tx.run(script)
def print_query(tx,name):
for record in tx.run("MATCH (a:Person)-[:KNOWS]->(f) WHERE a.name = {name} RETURN f.name",name=name):
print(record["f.name"])
return ""
#execute_read(print_query,'Alice') | from neo4j.v1 import GraphDatabase #neo4j==1.7.0
uri="bolt://localhost:7687"
driver=GraphDatabase.driver(uri, auth=("neo4j", "testneo4j"))
def execute_queries(scripts,message=None):
with driver.session() as session:
tx=session.begin_transaction()
res=tx.run(';'.join(scripts))
tx.commit()
return res
def execute_query(script,message=None):
with driver.session() as session:
return session.run(script,message)
def execute_read(cypher_func,message):
with driver.session() as session:
return session.read_transaction(cypher_func,message)
def execute_write(cypher_func,message):
with driver.session() as session:
return session.write_transaction(cypher_func,message)
def run_query(tx,script):
return tx.run(script)
def print_query(tx,name):
for record in tx.run("MATCH (a:Person)-[:KNOWS]->(f) WHERE a.name = {name} RETURN f.name",name=name):
print(record["f.name"])
return ""
#execute_read(print_query,'Alice') | en | 0.251728 | #neo4j==1.7.0 #execute_read(print_query,'Alice') | 2.638175 | 3 |
tests/unit/test_juju.py | KellenRenshaw/hotsos | 0 | 10454 | <filename>tests/unit/test_juju.py
import os
import tempfile
import mock
from . import utils
from hotsos.core.config import setup_config
from hotsos.core.ycheck.scenarios import YScenarioChecker
from hotsos.core.issues.utils import KnownBugsStore, IssuesStore
from hotsos.plugin_extensions.juju import summary
JOURNALCTL_CAPPEDPOSITIONLOST = """
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] CollectionCloner ns:juju.txns.log finished cloning with status: QueryPlanKilled: PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] collection clone for 'juju.txns.log' failed due to QueryPlanKilled: While cloning collection 'juju.txns.log' there was an error 'PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)'
""" # noqa
RABBITMQ_CHARM_LOGS = """
2021-02-17 08:18:44 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
2021-02-17 08:20:34 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
""" # noqa
UNIT_LEADERSHIP_ERROR = """
2021-09-16 10:28:25 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:28:47 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:06 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:53 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:30:41 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
""" # noqa
class JujuTestsBase(utils.BaseTestCase):
def setUp(self):
super().setUp()
setup_config(PLUGIN_NAME='juju')
class TestJujuSummary(JujuTestsBase):
def test_summary_keys(self):
inst = summary.JujuSummary()
self.assertEqual(list(inst.output.keys()),
['charm-repo-info',
'charms',
'machine',
'services',
'units',
'version'])
def test_service_info(self):
expected = {'ps': ['jujud (1)'],
'systemd': {
'enabled': ['jujud-machine-1']}
}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['services'],
expected)
def test_machine_info(self):
inst = summary.JujuSummary()
self.assertTrue(inst.plugin_runnable)
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.22')
self.assertEqual(actual['machine'], '1')
@mock.patch('hotsos.core.plugins.juju.JujuMachine')
def test_get_lxd_machine_info(self, mock_machine):
mock_machine.return_value = mock.MagicMock()
mock_machine.return_value.id = '0-lxd-11'
mock_machine.return_value.version = '2.9.9'
inst = summary.JujuSummary()
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.9')
self.assertEqual(actual['machine'], '0-lxd-11')
def test_charm_versions(self):
expected = ['ceph-osd-508', 'neutron-openvswitch-457',
'nova-compute-589']
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['charms'],
expected)
def test_get_unit_info(self):
expected = {'local': ['ceph-osd-0', 'neutron-openvswitch-1',
'nova-compute-0']}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['units'],
expected)
class TestJujuScenarios(JujuTestsBase):
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
def test_1852502(self, mock_helper):
mock_helper.return_value = mock.MagicMock()
mock_helper.return_value.journalctl.return_value = \
JOURNALCTL_CAPPEDPOSITIONLOST.splitlines(keepends=True)
YScenarioChecker()()
mock_helper.return_value.journalctl.assert_called_with(
unit='juju-db')
msg_1852502 = ('known mongodb bug identified - '
'https://jira.mongodb.org/browse/TOOLS-1636 '
'Workaround is to pass --no-logs to juju '
'create-backup. This is an issue only with Mongo '
'3. Mongo 4 does not have this issue. Upstream is '
'working on migrating to Mongo 4 in the Juju 3.0 '
'release.')
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1852502',
'desc': msg_1852502,
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
def test_1910958(self):
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-rabbitmq-server-0.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(RABBITMQ_CHARM_LOGS)
YScenarioChecker()()
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1910958',
'desc':
('Unit unit-rabbitmq-server-0 failed to start due '
'to members in relation 236 that cannot be '
'removed.'),
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('jujud_checks.yaml'))
@mock.patch('hotsos.core.host_helpers.systemd.ServiceChecksBase.processes',
{})
def test_jujud_checks(self):
YScenarioChecker()()
msg = ('No jujud processes found running on this host but it seems '
'there should be since Juju is installed.')
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('charm_checks.yaml'))
def test_unit_checks(self, mock_cli):
mock_cli.return_value = mock.MagicMock()
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-keystone-2.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(UNIT_LEADERSHIP_ERROR)
# first try outside age limit
mock_cli.return_value.date.return_value = "2021-09-25 00:00:00"
YScenarioChecker()()
self.assertEqual(IssuesStore().load(), {})
# then within
mock_cli.return_value.date.return_value = "2021-09-17 00:00:00"
YScenarioChecker()()
msg = ("Juju unit(s) 'keystone' are showing leadership errors in "
"their logs from the last 7 days. Please investigate.")
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
| <filename>tests/unit/test_juju.py
import os
import tempfile
import mock
from . import utils
from hotsos.core.config import setup_config
from hotsos.core.ycheck.scenarios import YScenarioChecker
from hotsos.core.issues.utils import KnownBugsStore, IssuesStore
from hotsos.plugin_extensions.juju import summary
JOURNALCTL_CAPPEDPOSITIONLOST = """
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] CollectionCloner ns:juju.txns.log finished cloning with status: QueryPlanKilled: PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] collection clone for 'juju.txns.log' failed due to QueryPlanKilled: While cloning collection 'juju.txns.log' there was an error 'PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)'
""" # noqa
RABBITMQ_CHARM_LOGS = """
2021-02-17 08:18:44 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
2021-02-17 08:20:34 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
""" # noqa
UNIT_LEADERSHIP_ERROR = """
2021-09-16 10:28:25 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:28:47 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:06 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:53 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:30:41 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
""" # noqa
class JujuTestsBase(utils.BaseTestCase):
def setUp(self):
super().setUp()
setup_config(PLUGIN_NAME='juju')
class TestJujuSummary(JujuTestsBase):
def test_summary_keys(self):
inst = summary.JujuSummary()
self.assertEqual(list(inst.output.keys()),
['charm-repo-info',
'charms',
'machine',
'services',
'units',
'version'])
def test_service_info(self):
expected = {'ps': ['jujud (1)'],
'systemd': {
'enabled': ['jujud-machine-1']}
}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['services'],
expected)
def test_machine_info(self):
inst = summary.JujuSummary()
self.assertTrue(inst.plugin_runnable)
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.22')
self.assertEqual(actual['machine'], '1')
@mock.patch('hotsos.core.plugins.juju.JujuMachine')
def test_get_lxd_machine_info(self, mock_machine):
mock_machine.return_value = mock.MagicMock()
mock_machine.return_value.id = '0-lxd-11'
mock_machine.return_value.version = '2.9.9'
inst = summary.JujuSummary()
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.9')
self.assertEqual(actual['machine'], '0-lxd-11')
def test_charm_versions(self):
expected = ['ceph-osd-508', 'neutron-openvswitch-457',
'nova-compute-589']
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['charms'],
expected)
def test_get_unit_info(self):
expected = {'local': ['ceph-osd-0', 'neutron-openvswitch-1',
'nova-compute-0']}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['units'],
expected)
class TestJujuScenarios(JujuTestsBase):
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
def test_1852502(self, mock_helper):
mock_helper.return_value = mock.MagicMock()
mock_helper.return_value.journalctl.return_value = \
JOURNALCTL_CAPPEDPOSITIONLOST.splitlines(keepends=True)
YScenarioChecker()()
mock_helper.return_value.journalctl.assert_called_with(
unit='juju-db')
msg_1852502 = ('known mongodb bug identified - '
'https://jira.mongodb.org/browse/TOOLS-1636 '
'Workaround is to pass --no-logs to juju '
'create-backup. This is an issue only with Mongo '
'3. Mongo 4 does not have this issue. Upstream is '
'working on migrating to Mongo 4 in the Juju 3.0 '
'release.')
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1852502',
'desc': msg_1852502,
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
def test_1910958(self):
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-rabbitmq-server-0.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(RABBITMQ_CHARM_LOGS)
YScenarioChecker()()
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1910958',
'desc':
('Unit unit-rabbitmq-server-0 failed to start due '
'to members in relation 236 that cannot be '
'removed.'),
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('jujud_checks.yaml'))
@mock.patch('hotsos.core.host_helpers.systemd.ServiceChecksBase.processes',
{})
def test_jujud_checks(self):
YScenarioChecker()()
msg = ('No jujud processes found running on this host but it seems '
'there should be since Juju is installed.')
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('charm_checks.yaml'))
def test_unit_checks(self, mock_cli):
mock_cli.return_value = mock.MagicMock()
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-keystone-2.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(UNIT_LEADERSHIP_ERROR)
# first try outside age limit
mock_cli.return_value.date.return_value = "2021-09-25 00:00:00"
YScenarioChecker()()
self.assertEqual(IssuesStore().load(), {})
# then within
mock_cli.return_value.date.return_value = "2021-09-17 00:00:00"
YScenarioChecker()()
msg = ("Juju unit(s) 'keystone' are showing leadership errors in "
"their logs from the last 7 days. Please investigate.")
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
| en | 0.877132 | Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] CollectionCloner ns:juju.txns.log finished cloning with status: QueryPlanKilled: PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366) Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] collection clone for 'juju.txns.log' failed due to QueryPlanKilled: While cloning collection 'juju.txns.log' there was an error 'PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)' # noqa 2021-02-17 08:18:44 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members 2021-02-17 08:20:34 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members # noqa 2021-09-16 10:28:25 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone" 2021-09-16 10:28:47 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone" 2021-09-16 10:29:06 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone" 2021-09-16 10:29:53 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone" 2021-09-16 10:30:41 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone" # noqa # first try outside age limit # then within | 1.663707 | 2 |
tools/SPGAN/main.py | by-liu/OpenUnReID | 0 | 10455 | import argparse
import collections
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
try:
# PyTorch >= 1.6 supports mixed precision training
from torch.cuda.amp import autocast
amp_support = True
except:
amp_support = False
from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
class SPGANRunner(GANBaseRunner):
def train_step(self, iter, batch):
data_src, data_tgt = batch[0], batch[1]
self.real_A = data_src['img'].cuda()
self.real_B = data_tgt['img'].cuda()
# Forward
self.fake_B = self.model['G_A'](self.real_A) # G_A(A)
self.fake_A = self.model['G_B'](self.real_B) # G_B(B)
self.rec_A = self.model['G_B'](self.fake_B) # G_B(G_A(A))
self.rec_B = self.model['G_A'](self.fake_A) # G_A(G_B(B))
# G_A and G_B
if iter % 2 == 0:
self.set_requires_grad([self.model['D_A'], self.model['D_B'], self.model['Metric']], False) # save memory
if self.scaler is None:
self.optimizer['G'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['G'].zero_grad()
if self._epoch > 1:
self.backward_G(retain_graph=True)
self.backward_GM()
else:
self.backward_G()
if self.scaler is None:
self.optimizer['G'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['G'])
# SiaNet for SPGAN
if self._epoch > 0:
self.set_requires_grad([self.model['Metric']], True)
if self.scaler is None:
self.optimizer['Metric'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['Metric'].zero_grad()
self.backward_M()
if self.scaler is None:
self.optimizer['Metric'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['Metric'])
# D_A and D_B
self.set_requires_grad([self.model['D_A'], self.model['D_B']], True)
# self.optimizer['D'].zero_grad()
# self.backward_D()
# self.optimizer['D'].step()
if self.scaler is None:
self.optimizer['D'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['D'].zero_grad()
self.backward_D()
if self.scaler is None:
self.optimizer['D'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['D'])
# save translated images
if self._rank == 0:
self.save_imgs(['real_A', 'real_B', 'fake_A', 'fake_B', 'rec_A', 'rec_B'])
return 0
def backward_GM(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A)
fake_B_metric = self.model['Metric'](self.fake_B)
# positive pairs
loss_pos = self.criterions['sia_G'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_G'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_G'](fake_B_metric, real_B_metric, 0) + \
self.criterions['sia_G'](fake_A_metric, real_A_metric, 0)
loss_M = (loss_pos + 0.5 * loss_neg) / 4.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_G']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_G': loss_M.item()}
self.train_progress.update(meters)
def backward_M(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A.detach())
fake_B_metric = self.model['Metric'](self.fake_B.detach())
# positive pairs
loss_pos = self.criterions['sia_M'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_M'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_M'](real_A_metric, real_B_metric, 0)
loss_M = (loss_pos + 2 * loss_neg) / 3.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_M']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_M': loss_M.item()}
self.train_progress.update(meters)
def parge_config():
parser = argparse.ArgumentParser(description="SPGAN training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
assert len(list(cfg.TRAIN.datasets.keys()))==2, \
"the number of datasets for domain-translation training should be two"
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / 'log.txt', debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, _ = build_train_dataloader(cfg, joint=False)
# build model
model = build_gan_model(cfg)
for key in model.keys():
model[key].cuda()
if dist:
ddp_cfg = {
"device_ids": [cfg.gpu],
"output_device": cfg.gpu,
"find_unused_parameters": True,
}
for key in model.keys():
model[key] = torch.nn.parallel.DistributedDataParallel(model[key], **ddp_cfg)
elif cfg.total_gpus > 1:
for key in model.keys():
model[key] = torch.nn.DataParallel(model[key])
# build optimizer
optimizer = {}
optimizer['G'] = build_optimizer([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)
optimizer['D'] = build_optimizer([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)
optimizer['Metric'] = build_optimizer([model['Metric']], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = [build_lr_scheduler(optimizer[key], **cfg.TRAIN.SCHEDULER) \
for key in optimizer.keys()]
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, cuda=True)
# build runner
runner = SPGANRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"}
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the latest model
# runner.resume(cfg.work_dir)
# final inference
test_loader, _ = build_val_dataloader(
cfg,
for_clustering=True,
all_datasets=True
)
# source to target
infer_gan(
cfg,
model['G_A'],
test_loader[0],
dataset_name=list(cfg.TRAIN.datasets.keys())[0]
)
# target to source
infer_gan(
cfg,
model['G_B'],
test_loader[1],
dataset_name=list(cfg.TRAIN.datasets.keys())[1]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
main()
| import argparse
import collections
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
try:
# PyTorch >= 1.6 supports mixed precision training
from torch.cuda.amp import autocast
amp_support = True
except:
amp_support = False
from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
class SPGANRunner(GANBaseRunner):
def train_step(self, iter, batch):
data_src, data_tgt = batch[0], batch[1]
self.real_A = data_src['img'].cuda()
self.real_B = data_tgt['img'].cuda()
# Forward
self.fake_B = self.model['G_A'](self.real_A) # G_A(A)
self.fake_A = self.model['G_B'](self.real_B) # G_B(B)
self.rec_A = self.model['G_B'](self.fake_B) # G_B(G_A(A))
self.rec_B = self.model['G_A'](self.fake_A) # G_A(G_B(B))
# G_A and G_B
if iter % 2 == 0:
self.set_requires_grad([self.model['D_A'], self.model['D_B'], self.model['Metric']], False) # save memory
if self.scaler is None:
self.optimizer['G'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['G'].zero_grad()
if self._epoch > 1:
self.backward_G(retain_graph=True)
self.backward_GM()
else:
self.backward_G()
if self.scaler is None:
self.optimizer['G'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['G'])
# SiaNet for SPGAN
if self._epoch > 0:
self.set_requires_grad([self.model['Metric']], True)
if self.scaler is None:
self.optimizer['Metric'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['Metric'].zero_grad()
self.backward_M()
if self.scaler is None:
self.optimizer['Metric'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['Metric'])
# D_A and D_B
self.set_requires_grad([self.model['D_A'], self.model['D_B']], True)
# self.optimizer['D'].zero_grad()
# self.backward_D()
# self.optimizer['D'].step()
if self.scaler is None:
self.optimizer['D'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['D'].zero_grad()
self.backward_D()
if self.scaler is None:
self.optimizer['D'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['D'])
# save translated images
if self._rank == 0:
self.save_imgs(['real_A', 'real_B', 'fake_A', 'fake_B', 'rec_A', 'rec_B'])
return 0
def backward_GM(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A)
fake_B_metric = self.model['Metric'](self.fake_B)
# positive pairs
loss_pos = self.criterions['sia_G'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_G'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_G'](fake_B_metric, real_B_metric, 0) + \
self.criterions['sia_G'](fake_A_metric, real_A_metric, 0)
loss_M = (loss_pos + 0.5 * loss_neg) / 4.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_G']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_G': loss_M.item()}
self.train_progress.update(meters)
def backward_M(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A.detach())
fake_B_metric = self.model['Metric'](self.fake_B.detach())
# positive pairs
loss_pos = self.criterions['sia_M'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_M'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_M'](real_A_metric, real_B_metric, 0)
loss_M = (loss_pos + 2 * loss_neg) / 3.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_M']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_M': loss_M.item()}
self.train_progress.update(meters)
def parge_config():
parser = argparse.ArgumentParser(description="SPGAN training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
assert len(list(cfg.TRAIN.datasets.keys()))==2, \
"the number of datasets for domain-translation training should be two"
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / 'log.txt', debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, _ = build_train_dataloader(cfg, joint=False)
# build model
model = build_gan_model(cfg)
for key in model.keys():
model[key].cuda()
if dist:
ddp_cfg = {
"device_ids": [cfg.gpu],
"output_device": cfg.gpu,
"find_unused_parameters": True,
}
for key in model.keys():
model[key] = torch.nn.parallel.DistributedDataParallel(model[key], **ddp_cfg)
elif cfg.total_gpus > 1:
for key in model.keys():
model[key] = torch.nn.DataParallel(model[key])
# build optimizer
optimizer = {}
optimizer['G'] = build_optimizer([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)
optimizer['D'] = build_optimizer([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)
optimizer['Metric'] = build_optimizer([model['Metric']], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = [build_lr_scheduler(optimizer[key], **cfg.TRAIN.SCHEDULER) \
for key in optimizer.keys()]
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, cuda=True)
# build runner
runner = SPGANRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"}
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the latest model
# runner.resume(cfg.work_dir)
# final inference
test_loader, _ = build_val_dataloader(
cfg,
for_clustering=True,
all_datasets=True
)
# source to target
infer_gan(
cfg,
model['G_A'],
test_loader[0],
dataset_name=list(cfg.TRAIN.datasets.keys())[0]
)
# target to source
infer_gan(
cfg,
model['G_B'],
test_loader[1],
dataset_name=list(cfg.TRAIN.datasets.keys())[1]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
main()
| en | 0.529551 | # PyTorch >= 1.6 supports mixed precision training # Forward # G_A(A) # G_B(B) # G_B(G_A(A)) # G_A(G_B(B)) # G_A and G_B # save memory # SiaNet for SPGAN # D_A and D_B # self.optimizer['D'].zero_grad() # self.backward_D() # self.optimizer['D'].step() # save translated images # positive pairs # negative pairs # positive pairs # negative pairs # init distributed training # init logging file # build train loader # build model # build optimizer # build lr_scheduler # build loss functions # build runner # resume # start training # load the latest model # runner.resume(cfg.work_dir) # final inference # source to target # target to source # print time | 1.963891 | 2 |
utility/data_download.py | LatvianPython/wind-experience | 2 | 10456 | import logging
import requests
import multiprocessing
import pathlib
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from joblib import delayed
from joblib import Parallel
from datetime import date
from datetime import timedelta
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def next_date(start_date=date(2018, 3, 1)):
days_to_download = abs(start_date - date.today()).days - 5
for date_offset in range(days_to_download):
yield start_date
start_date = start_date + timedelta(days=1)
def download_all(inputs: List[Tuple[pathlib.Path, str]], cookies: Optional[Dict]):
session = requests.session()
inputs[0][0].parent.mkdir(parents=True, exist_ok=True)
def download_single_link(file_path: pathlib.Path, url):
thread_nr = multiprocessing.current_process().name
thread_nr = thread_nr[thread_nr.rfind('-') + 1:]
file_name = file_path.stem
if file_path.is_file():
logger.info('{} {} already exists'.format(thread_nr, file_name))
return
try:
response = session.get(url=url, cookies=cookies)
except TimeoutError:
logger.critical('{} Timeout Error'.format(thread_nr))
return
content = response.content.decode('utf-8')
if response.status_code != 200:
logger.critical('{} {}'.format(thread_nr, url, response.status_code))
logger.critical('{}'.format(thread_nr, content))
return
else:
logger.info('{} {} {} OK'.format(thread_nr, file_name, response.status_code))
with open(str(file_path), mode='w', encoding='utf-8') as output_file:
output_file.write(content)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(download_single_link)(*j) for j in inputs)
| import logging
import requests
import multiprocessing
import pathlib
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from joblib import delayed
from joblib import Parallel
from datetime import date
from datetime import timedelta
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def next_date(start_date=date(2018, 3, 1)):
days_to_download = abs(start_date - date.today()).days - 5
for date_offset in range(days_to_download):
yield start_date
start_date = start_date + timedelta(days=1)
def download_all(inputs: List[Tuple[pathlib.Path, str]], cookies: Optional[Dict]):
session = requests.session()
inputs[0][0].parent.mkdir(parents=True, exist_ok=True)
def download_single_link(file_path: pathlib.Path, url):
thread_nr = multiprocessing.current_process().name
thread_nr = thread_nr[thread_nr.rfind('-') + 1:]
file_name = file_path.stem
if file_path.is_file():
logger.info('{} {} already exists'.format(thread_nr, file_name))
return
try:
response = session.get(url=url, cookies=cookies)
except TimeoutError:
logger.critical('{} Timeout Error'.format(thread_nr))
return
content = response.content.decode('utf-8')
if response.status_code != 200:
logger.critical('{} {}'.format(thread_nr, url, response.status_code))
logger.critical('{}'.format(thread_nr, content))
return
else:
logger.info('{} {} {} OK'.format(thread_nr, file_name, response.status_code))
with open(str(file_path), mode='w', encoding='utf-8') as output_file:
output_file.write(content)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(download_single_link)(*j) for j in inputs)
| none | 1 | 2.518842 | 3 |
|
model/net_qspline_A.py | jercoco/QSQF | 0 | 10457 | <filename>model/net_qspline_A.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 19:52:22 2020
#Plan A
@author: 18096
"""
'''Defines the neural network, loss function and metrics'''
#from functools import reduce
import torch
import torch.nn as nn
from torch.nn.functional import pad
from torch.autograd import Variable
import logging
logger = logging.getLogger('DeepAR.Net')
class Net(nn.Module):
def __init__(self, params,device):
'''
We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates.
'''
super(Net, self).__init__()
self.params = params
self.device = device
self.lstm = nn.LSTM(input_size=params.lstm_input_size,
hidden_size=params.lstm_hidden_dim,
num_layers=params.lstm_layers,
bias=True,
batch_first=False,
dropout=params.lstm_dropout)
# initialize LSTM forget gate bias to be 1 as recommanded by
# http://proceedings.mlr.press/v37/jozefowicz15.pdf
for names in self.lstm._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
#Plan A:
#beta_01:[beta0,beta1]
self.beta_n1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_beta_1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_sigma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
self.pre_gamma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
# softmax to make sure Σu equals to 1
self.sigma = nn.Softmax(dim=1)
# softplus to make sure gamma is positive
self.gamma = nn.Softplus()
# softplus to make sure beta0 is positive
self.beta_1 = nn.Softplus()
def forward(self, x, hidden, cell):
_, (hidden, cell) = self.lstm(x, (hidden, cell))
# use h from all three layers to calculate mu and sigma
hidden_permute = \
hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1)
#Plan A:
beta_n1 = self.beta_n1(hidden_permute)
pre_beta_1 = self.pre_beta_1(hidden_permute)
beta_1 = self.beta_1(pre_beta_1)
beta_1=-beta_1
pre_sigma = self.pre_sigma(hidden_permute)
sigma = self.sigma(pre_sigma)
pre_gamma = self.pre_gamma(hidden_permute)
gamma = self.gamma(pre_gamma)
#Plan A:
return ((beta_n1,beta_1,sigma,torch.squeeze(gamma)),hidden,cell)
def init_hidden(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def init_cell(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def predict(self, x, hidden, cell, sampling=False):
"""
generate samples by sampling from
"""
batch_size = x.shape[1]
samples = torch.zeros(self.params.sample_times,batch_size,
self.params.pred_steps,
device=self.device)
for j in range(self.params.sample_times):
decoder_hidden = hidden
decoder_cell = cell
for t in range(self.params.pred_steps):
func_param,decoder_hidden,decoder_cell=\
self(x[self.params.pred_start+t].unsqueeze(0),
decoder_hidden,decoder_cell)
beta_n1,beta_1,sigma,gamma=func_param
#pred_cdf is a uniform ditribution
uniform = torch.distributions.uniform.Uniform(
torch.tensor([0.0], device=sigma.device),
torch.tensor([1.0], device=sigma.device))
pred_cdf=uniform.sample([batch_size])
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
ksi=pad(torch.cumsum(sigma,dim=1),(1,0))[:,:-1]
indices=ksi<pred_cdf
pred=(beta_N*pad(pred_cdf,(1,0),value=1)).sum(dim=1)
pred=pred+((pred_cdf-ksi).pow(2)*beta*indices).sum(dim=1)
samples[j, :, t] = pred
#predict value at t-1 is as a covars for t,t+1,...,t+lag
for lag in range(self.params.lag):
if t<self.params.pred_steps-lag-1:
x[self.params.pred_start+t+1,:,0]=pred
sample_mu = torch.mean(samples, dim=0) # mean or median ?
sample_std = samples.std(dim=0)
return samples, sample_mu, sample_std
def loss_fn(func_param, labels: Variable):
beta_n1,beta_1,sigma,gamma=func_param
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
#calculate the maximum for each segment of the spline
ksi=torch.cumsum(sigma,dim=1)
df1=ksi.expand(sigma.shape[1],sigma.shape[0],sigma.shape[1]).T.clone()
df2=pad(ksi.T.unsqueeze(2),(1,0),'constant',value=1)
ksi=pad(ksi,(1,0))[:,:-1]
knots=df1-ksi
knots[knots<0]=0
knots=(df2*beta_N).sum(dim=2)+(knots.pow(2)*beta).sum(dim=2)
knots=pad(knots.T,(1,0))[:,:-1]#F(ksi_1~K)=0~max
diff=labels.view(-1,1)-knots
alpha_l=diff>0
alpha_A=torch.sum(alpha_l*beta,dim=1)
alpha_B=beta_N[:,1]-2*torch.sum(alpha_l*beta*ksi,dim=1)
alpha_C=beta_N[:,0]-labels+torch.sum(alpha_l*beta*ksi*ksi,dim=1)
#since A may be zero, roots can be from different methods.
not_zero=(alpha_A!=0)
alpha=torch.zeros_like(alpha_A)
#since there may be numerical calculation error,#0
idx=(alpha_B**2-4*alpha_A*alpha_C)<0#0
diff=diff.abs()
index=diff==(diff.min(dim=1)[0].view(-1,1))
index[~idx,:]=False
#index=diff.abs()<1e-4#0,1e-4 is a threshold
#idx=index.sum(dim=1)>0#0
alpha[idx]=ksi[index]#0
alpha[~not_zero]=-alpha_C[~not_zero]/alpha_B[~not_zero]
not_zero=~(~not_zero | idx)#0
delta=alpha_B[not_zero].pow(2)-4*alpha_A[not_zero]*alpha_C[not_zero]
alpha[not_zero]=(-alpha_B[not_zero]+torch.sqrt(delta))/(2*alpha_A[not_zero])
crps_1=labels*(2*alpha-1)
#lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n)
#crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)])
crps_2=beta_N[:,0]*(1-2*alpha)+beta_N[:,1]*(1/3-alpha.pow(2))
crps_3=torch.sum(2*beta/((2+1)*(2+2))*(1-ksi).pow(2+2),dim=1)
crps_4=torch.sum(alpha_l*2*beta/(2+1)*(torch.unsqueeze(alpha,1)-ksi).pow(2+1),dim=1)
crps=crps_1+crps_2+crps_3-crps_4
crps = torch.mean(crps)
return crps
| <filename>model/net_qspline_A.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 19:52:22 2020
#Plan A
@author: 18096
"""
'''Defines the neural network, loss function and metrics'''
#from functools import reduce
import torch
import torch.nn as nn
from torch.nn.functional import pad
from torch.autograd import Variable
import logging
logger = logging.getLogger('DeepAR.Net')
class Net(nn.Module):
def __init__(self, params,device):
'''
We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates.
'''
super(Net, self).__init__()
self.params = params
self.device = device
self.lstm = nn.LSTM(input_size=params.lstm_input_size,
hidden_size=params.lstm_hidden_dim,
num_layers=params.lstm_layers,
bias=True,
batch_first=False,
dropout=params.lstm_dropout)
# initialize LSTM forget gate bias to be 1 as recommanded by
# http://proceedings.mlr.press/v37/jozefowicz15.pdf
for names in self.lstm._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
#Plan A:
#beta_01:[beta0,beta1]
self.beta_n1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_beta_1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_sigma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
self.pre_gamma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
# softmax to make sure Σu equals to 1
self.sigma = nn.Softmax(dim=1)
# softplus to make sure gamma is positive
self.gamma = nn.Softplus()
# softplus to make sure beta0 is positive
self.beta_1 = nn.Softplus()
def forward(self, x, hidden, cell):
_, (hidden, cell) = self.lstm(x, (hidden, cell))
# use h from all three layers to calculate mu and sigma
hidden_permute = \
hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1)
#Plan A:
beta_n1 = self.beta_n1(hidden_permute)
pre_beta_1 = self.pre_beta_1(hidden_permute)
beta_1 = self.beta_1(pre_beta_1)
beta_1=-beta_1
pre_sigma = self.pre_sigma(hidden_permute)
sigma = self.sigma(pre_sigma)
pre_gamma = self.pre_gamma(hidden_permute)
gamma = self.gamma(pre_gamma)
#Plan A:
return ((beta_n1,beta_1,sigma,torch.squeeze(gamma)),hidden,cell)
def init_hidden(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def init_cell(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def predict(self, x, hidden, cell, sampling=False):
"""
generate samples by sampling from
"""
batch_size = x.shape[1]
samples = torch.zeros(self.params.sample_times,batch_size,
self.params.pred_steps,
device=self.device)
for j in range(self.params.sample_times):
decoder_hidden = hidden
decoder_cell = cell
for t in range(self.params.pred_steps):
func_param,decoder_hidden,decoder_cell=\
self(x[self.params.pred_start+t].unsqueeze(0),
decoder_hidden,decoder_cell)
beta_n1,beta_1,sigma,gamma=func_param
#pred_cdf is a uniform ditribution
uniform = torch.distributions.uniform.Uniform(
torch.tensor([0.0], device=sigma.device),
torch.tensor([1.0], device=sigma.device))
pred_cdf=uniform.sample([batch_size])
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
ksi=pad(torch.cumsum(sigma,dim=1),(1,0))[:,:-1]
indices=ksi<pred_cdf
pred=(beta_N*pad(pred_cdf,(1,0),value=1)).sum(dim=1)
pred=pred+((pred_cdf-ksi).pow(2)*beta*indices).sum(dim=1)
samples[j, :, t] = pred
#predict value at t-1 is as a covars for t,t+1,...,t+lag
for lag in range(self.params.lag):
if t<self.params.pred_steps-lag-1:
x[self.params.pred_start+t+1,:,0]=pred
sample_mu = torch.mean(samples, dim=0) # mean or median ?
sample_std = samples.std(dim=0)
return samples, sample_mu, sample_std
def loss_fn(func_param, labels: Variable):
beta_n1,beta_1,sigma,gamma=func_param
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
#calculate the maximum for each segment of the spline
ksi=torch.cumsum(sigma,dim=1)
df1=ksi.expand(sigma.shape[1],sigma.shape[0],sigma.shape[1]).T.clone()
df2=pad(ksi.T.unsqueeze(2),(1,0),'constant',value=1)
ksi=pad(ksi,(1,0))[:,:-1]
knots=df1-ksi
knots[knots<0]=0
knots=(df2*beta_N).sum(dim=2)+(knots.pow(2)*beta).sum(dim=2)
knots=pad(knots.T,(1,0))[:,:-1]#F(ksi_1~K)=0~max
diff=labels.view(-1,1)-knots
alpha_l=diff>0
alpha_A=torch.sum(alpha_l*beta,dim=1)
alpha_B=beta_N[:,1]-2*torch.sum(alpha_l*beta*ksi,dim=1)
alpha_C=beta_N[:,0]-labels+torch.sum(alpha_l*beta*ksi*ksi,dim=1)
#since A may be zero, roots can be from different methods.
not_zero=(alpha_A!=0)
alpha=torch.zeros_like(alpha_A)
#since there may be numerical calculation error,#0
idx=(alpha_B**2-4*alpha_A*alpha_C)<0#0
diff=diff.abs()
index=diff==(diff.min(dim=1)[0].view(-1,1))
index[~idx,:]=False
#index=diff.abs()<1e-4#0,1e-4 is a threshold
#idx=index.sum(dim=1)>0#0
alpha[idx]=ksi[index]#0
alpha[~not_zero]=-alpha_C[~not_zero]/alpha_B[~not_zero]
not_zero=~(~not_zero | idx)#0
delta=alpha_B[not_zero].pow(2)-4*alpha_A[not_zero]*alpha_C[not_zero]
alpha[not_zero]=(-alpha_B[not_zero]+torch.sqrt(delta))/(2*alpha_A[not_zero])
crps_1=labels*(2*alpha-1)
#lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n)
#crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)])
crps_2=beta_N[:,0]*(1-2*alpha)+beta_N[:,1]*(1/3-alpha.pow(2))
crps_3=torch.sum(2*beta/((2+1)*(2+2))*(1-ksi).pow(2+2),dim=1)
crps_4=torch.sum(alpha_l*2*beta/(2+1)*(torch.unsqueeze(alpha,1)-ksi).pow(2+1),dim=1)
crps=crps_1+crps_2+crps_3-crps_4
crps = torch.mean(crps)
return crps
| en | 0.737448 | # -*- coding: utf-8 -*- Created on Wed Oct 21 19:52:22 2020
#Plan A
@author: 18096 Defines the neural network, loss function and metrics #from functools import reduce We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates. # initialize LSTM forget gate bias to be 1 as recommanded by # http://proceedings.mlr.press/v37/jozefowicz15.pdf #Plan A: #beta_01:[beta0,beta1] # softmax to make sure Σu equals to 1 # softplus to make sure gamma is positive # softplus to make sure beta0 is positive # use h from all three layers to calculate mu and sigma #Plan A: #Plan A: generate samples by sampling from #pred_cdf is a uniform ditribution #predict value at t-1 is as a covars for t,t+1,...,t+lag # mean or median ? #calculate the maximum for each segment of the spline #F(ksi_1~K)=0~max #since A may be zero, roots can be from different methods. #since there may be numerical calculation error,#0 #0 #index=diff.abs()<1e-4#0,1e-4 is a threshold #idx=index.sum(dim=1)>0#0 #0 #0 #lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n) #crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)]) | 2.378718 | 2 |
tests/repositories/helpers/methods/test_reinstall_if_needed.py | traibnn/integration | 1 | 10458 | <filename>tests/repositories/helpers/methods/test_reinstall_if_needed.py
import pytest
@pytest.mark.asyncio
async def test_reinstall_if_needed(repository):
repository.content.path.local = "/non/existing/dir"
repository.data.installed = True
await repository.async_reinstall_if_needed()
| <filename>tests/repositories/helpers/methods/test_reinstall_if_needed.py
import pytest
@pytest.mark.asyncio
async def test_reinstall_if_needed(repository):
repository.content.path.local = "/non/existing/dir"
repository.data.installed = True
await repository.async_reinstall_if_needed()
| none | 1 | 1.814353 | 2 |
|
workflow_parser/datasource/log_engine.py | cyx1231st/workflow_parser | 0 | 10459 | <filename>workflow_parser/datasource/log_engine.py<gh_stars>0
# Copyright (c) 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from collections import defaultdict
import os
from os import path
import sys
from .. import reserved_vars as rv
from ..service_registry import Component
from ..service_registry import ServiceRegistry
from . import Line
from . import Source
from .exc import LogError
class DriverPlugin(object):
__metaclass__ = ABCMeta
def __init__(self,
f_filter_logfile,
f_filter_logline,
extensions):
self._extensions = extensions
self.f_filter_logfile = f_filter_logfile
self.f_filter_logline = f_filter_logline
def _purge_dict_empty_values(self, var_dict):
for k in var_dict.keys():
if var_dict[k] in {None, ""}:
var_dict.pop(k)
def do_filter_logfile(self, f_dir, f_name):
assert isinstance(f_dir, str)
assert isinstance(f_name, str)
assert f_name in f_dir
# skip non-file
if not path.isfile(f_dir):
return False, None
# check file extension
ext_match = False
for ext in self._extensions:
if f_name.endswith("." + ext):
ext_match = True
if not ext_match:
return False, None
try:
var_dict = {}
ret = self.f_filter_logfile(f_dir, f_name, var_dict)
assert isinstance(ret, bool)
if ret:
# NOTE
# print("(LogDriver) loaded: %s" % f_dir)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
return True, var_dict
else:
# skip
return False, None
except Exception as e:
raise LogError(
"(LogDriver) `f_filter_logfile` error when f_name=%s"
% f_name, e)
def do_filter_logline(self, line, lino, where):
assert isinstance(line, str)
assert isinstance(lino, int)
assert isinstance(where, str)
try:
var_dict = {}
ret = self.f_filter_logline(line, var_dict)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
assert isinstance(ret, bool)
return ret, var_dict
except Exception as e:
raise LogError("(LogDriver) `f_filter_logline` error at %s@%d %s"
% (where, lino, line), e)
class FileDatasource(object):
def __init__(self, name, f_dir, vs, sr, plugin):
assert isinstance(sr, ServiceRegistry)
assert isinstance(plugin, DriverPlugin)
self.sr = sr
self.plugin = plugin
self.name = name
self.f_dir = f_dir
self.total_lines = 0
self.source = Source(name, f_dir, vs)
self.requests = set()
@property
def total_lineobjs(self):
return self.source.len_lineobjs
# def _buffer_lines(self, lines):
# buffer_lines = Heap(key=lambda a: a.seconds)
# prv_line = [None]
# def _flush_line(flush=None):
# while buffer_lines:
# if flush and buffer_lines.distance < flush:
# break
# line = buffer_lines.pop()
# if prv_line[0] is not None:
# prv_line[0].nxt_logline = line
# line.prv_logline = prv_line[0]
# assert prv_line[0] <= line
# yield line
# prv_line[0] = line
# for line in lines:
# assert isinstance(line, LogLine)
# buffer_lines.push(line)
# for line in _flush_line(1):
# yield line
# for line in _flush_line():
# yield line
def yield_lineobjs(self, targets_byname):
with open(self.f_dir, 'r') as reader:
for line in reader:
self.total_lines += 1
lino = self.total_lines
if_proceed, vs = self.plugin.do_filter_logline(
line, lino, self.name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s@%d %s: unrecognized component %s"
% (self.name, lino, line, component))
else:
vs[rv.COMPONENT] = c_obj
# collect requests
request = vs.get(rv.REQUEST)
if request is not None:
self.requests.add(request)
lineobj = self.source.append_line(
lino, line, vs, targets_byname)
yield lineobj
@classmethod
def create_byfolder(cls, log_folder, sr, plugin):
assert isinstance(log_folder, str)
assert isinstance(plugin, DriverPlugin)
datasources = []
# current_path = path.dirname(os.path.realpath(__file__))
current_path = os.getcwd()
log_folder = path.join(current_path, log_folder)
for f_name in os.listdir(log_folder):
f_dir = path.join(log_folder, f_name)
if_proceed, vs = plugin.do_filter_logfile(f_dir, f_name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s: unrecognized component %s"
% (f_name, component))
else:
vs[rv.COMPONENT] = c_obj
ds = cls(f_name.rsplit(".", 1)[0], f_dir, vs, sr, plugin)
datasources.append(ds)
return log_folder, datasources
# step1: load related log files
def loadsources(log_folder, sr, plugin):
print("Load data sources...")
log_folder, datasources = FileDatasource.create_byfolder(
log_folder, sr, plugin)
print("---------------")
#### summary ####
print("%d datasources from %s" % (len(datasources), log_folder))
print()
return datasources
# step2: read sources
def readsources(datasources, sr, report):
targets_byname = {}
targets_byhost = defaultdict(list)
targets_bycomponent = defaultdict(list)
threads = set()
print("Read data sources...")
for datasource in datasources:
for line_obj in datasource.yield_lineobjs(targets_byname):
pass
for targetobj in targets_byname.values():
if not isinstance(targetobj.target, str) or not targetobj.target:
raise LogError("%s has invalid target: %s" % (
targetobj, target.target))
if not isinstance(targetobj.host, str) or not targetobj.host:
raise LogError("%s has invalid host: %s" % (
targetobj, target.host))
if not isinstance(targetobj.component, Component):
raise LogError("%s has invalid component: %s" % (
targetobj, target.component))
targets_byhost[targetobj.host].append(targetobj)
targets_bycomponent[targetobj.component].append(targetobj)
threads.update(targetobj.thread_objs)
print("---------------")
#### summary ####
total_targets = len(targets_byname)
total_hosts = len(targets_byhost)
total_components = len(targets_bycomponent)
print("%d targets, %d hosts" %
(total_targets,
total_hosts))
total_lines = sum(datasource.total_lines for datasource in datasources)
total_lineobjs = sum(datasource.total_lineobjs
for datasource in datasources)
if not total_lines:
print("0 valid lines")
else:
print("%.2f%% valid: %d lines -> %d lineobjs"
% (float(total_lineobjs)/total_lines*100,
total_lines,
total_lineobjs))
for comp in sr.sr_components:
targets = targets_bycomponent.get(comp, [])
if not targets:
raise LogError("ERROR! miss component %s" % comp)
else:
component_threads = sum(len(target.thread_objs) for target in targets)
component_lines = sum(target.len_lineobjs for target in targets)
min_target_threads, max_target_threads = sys.maxsize, 0
min_target_lineobjs, max_target_lineobjs = sys.maxsize, 0
hosts_ = set()
for target_obj in targets:
hosts_.add(target_obj.host)
min_target_threads = min(min_target_threads, len(target_obj.thread_objs))
max_target_threads = max(max_target_threads, len(target_obj.thread_objs))
min_target_lineobjs = min(min_target_lineobjs,
target_obj.len_lineobjs)
max_target_lineobjs = max(max_target_lineobjs,
target_obj.len_lineobjs)
print(" %s: %d hosts, %d targets, %d threads, %d lines"
% (comp, len(hosts_), len(targets),
component_threads,
component_lines))
print(" per-target: %.3f[%d, %d] threads, %.3f[%d, %d] loglines"
% (component_threads/float(len(targets)),
min_target_threads,
max_target_threads,
component_lines/float(len(targets)),
min_target_lineobjs,
max_target_lineobjs))
print()
#### report #####
requests = set()
for ds in datasources:
requests.update(ds.requests)
report.step("read", line=total_lineobjs,
component=total_components,
host=total_hosts,
target=total_targets,
thread=len(threads),
request=len(requests))
return targets_byname
def proceed(logfolder, sr, plugin, report):
datasources = loadsources(logfolder, sr, plugin)
targetobjs = readsources(datasources, sr, report)
return targetobjs
| <filename>workflow_parser/datasource/log_engine.py<gh_stars>0
# Copyright (c) 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from collections import defaultdict
import os
from os import path
import sys
from .. import reserved_vars as rv
from ..service_registry import Component
from ..service_registry import ServiceRegistry
from . import Line
from . import Source
from .exc import LogError
class DriverPlugin(object):
__metaclass__ = ABCMeta
def __init__(self,
f_filter_logfile,
f_filter_logline,
extensions):
self._extensions = extensions
self.f_filter_logfile = f_filter_logfile
self.f_filter_logline = f_filter_logline
def _purge_dict_empty_values(self, var_dict):
for k in var_dict.keys():
if var_dict[k] in {None, ""}:
var_dict.pop(k)
def do_filter_logfile(self, f_dir, f_name):
assert isinstance(f_dir, str)
assert isinstance(f_name, str)
assert f_name in f_dir
# skip non-file
if not path.isfile(f_dir):
return False, None
# check file extension
ext_match = False
for ext in self._extensions:
if f_name.endswith("." + ext):
ext_match = True
if not ext_match:
return False, None
try:
var_dict = {}
ret = self.f_filter_logfile(f_dir, f_name, var_dict)
assert isinstance(ret, bool)
if ret:
# NOTE
# print("(LogDriver) loaded: %s" % f_dir)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
return True, var_dict
else:
# skip
return False, None
except Exception as e:
raise LogError(
"(LogDriver) `f_filter_logfile` error when f_name=%s"
% f_name, e)
def do_filter_logline(self, line, lino, where):
assert isinstance(line, str)
assert isinstance(lino, int)
assert isinstance(where, str)
try:
var_dict = {}
ret = self.f_filter_logline(line, var_dict)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
assert isinstance(ret, bool)
return ret, var_dict
except Exception as e:
raise LogError("(LogDriver) `f_filter_logline` error at %s@%d %s"
% (where, lino, line), e)
class FileDatasource(object):
def __init__(self, name, f_dir, vs, sr, plugin):
assert isinstance(sr, ServiceRegistry)
assert isinstance(plugin, DriverPlugin)
self.sr = sr
self.plugin = plugin
self.name = name
self.f_dir = f_dir
self.total_lines = 0
self.source = Source(name, f_dir, vs)
self.requests = set()
@property
def total_lineobjs(self):
return self.source.len_lineobjs
# def _buffer_lines(self, lines):
# buffer_lines = Heap(key=lambda a: a.seconds)
# prv_line = [None]
# def _flush_line(flush=None):
# while buffer_lines:
# if flush and buffer_lines.distance < flush:
# break
# line = buffer_lines.pop()
# if prv_line[0] is not None:
# prv_line[0].nxt_logline = line
# line.prv_logline = prv_line[0]
# assert prv_line[0] <= line
# yield line
# prv_line[0] = line
# for line in lines:
# assert isinstance(line, LogLine)
# buffer_lines.push(line)
# for line in _flush_line(1):
# yield line
# for line in _flush_line():
# yield line
def yield_lineobjs(self, targets_byname):
with open(self.f_dir, 'r') as reader:
for line in reader:
self.total_lines += 1
lino = self.total_lines
if_proceed, vs = self.plugin.do_filter_logline(
line, lino, self.name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s@%d %s: unrecognized component %s"
% (self.name, lino, line, component))
else:
vs[rv.COMPONENT] = c_obj
# collect requests
request = vs.get(rv.REQUEST)
if request is not None:
self.requests.add(request)
lineobj = self.source.append_line(
lino, line, vs, targets_byname)
yield lineobj
@classmethod
def create_byfolder(cls, log_folder, sr, plugin):
assert isinstance(log_folder, str)
assert isinstance(plugin, DriverPlugin)
datasources = []
# current_path = path.dirname(os.path.realpath(__file__))
current_path = os.getcwd()
log_folder = path.join(current_path, log_folder)
for f_name in os.listdir(log_folder):
f_dir = path.join(log_folder, f_name)
if_proceed, vs = plugin.do_filter_logfile(f_dir, f_name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s: unrecognized component %s"
% (f_name, component))
else:
vs[rv.COMPONENT] = c_obj
ds = cls(f_name.rsplit(".", 1)[0], f_dir, vs, sr, plugin)
datasources.append(ds)
return log_folder, datasources
# step1: load related log files
def loadsources(log_folder, sr, plugin):
print("Load data sources...")
log_folder, datasources = FileDatasource.create_byfolder(
log_folder, sr, plugin)
print("---------------")
#### summary ####
print("%d datasources from %s" % (len(datasources), log_folder))
print()
return datasources
# step2: read sources
def readsources(datasources, sr, report):
targets_byname = {}
targets_byhost = defaultdict(list)
targets_bycomponent = defaultdict(list)
threads = set()
print("Read data sources...")
for datasource in datasources:
for line_obj in datasource.yield_lineobjs(targets_byname):
pass
for targetobj in targets_byname.values():
if not isinstance(targetobj.target, str) or not targetobj.target:
raise LogError("%s has invalid target: %s" % (
targetobj, target.target))
if not isinstance(targetobj.host, str) or not targetobj.host:
raise LogError("%s has invalid host: %s" % (
targetobj, target.host))
if not isinstance(targetobj.component, Component):
raise LogError("%s has invalid component: %s" % (
targetobj, target.component))
targets_byhost[targetobj.host].append(targetobj)
targets_bycomponent[targetobj.component].append(targetobj)
threads.update(targetobj.thread_objs)
print("---------------")
#### summary ####
total_targets = len(targets_byname)
total_hosts = len(targets_byhost)
total_components = len(targets_bycomponent)
print("%d targets, %d hosts" %
(total_targets,
total_hosts))
total_lines = sum(datasource.total_lines for datasource in datasources)
total_lineobjs = sum(datasource.total_lineobjs
for datasource in datasources)
if not total_lines:
print("0 valid lines")
else:
print("%.2f%% valid: %d lines -> %d lineobjs"
% (float(total_lineobjs)/total_lines*100,
total_lines,
total_lineobjs))
for comp in sr.sr_components:
targets = targets_bycomponent.get(comp, [])
if not targets:
raise LogError("ERROR! miss component %s" % comp)
else:
component_threads = sum(len(target.thread_objs) for target in targets)
component_lines = sum(target.len_lineobjs for target in targets)
min_target_threads, max_target_threads = sys.maxsize, 0
min_target_lineobjs, max_target_lineobjs = sys.maxsize, 0
hosts_ = set()
for target_obj in targets:
hosts_.add(target_obj.host)
min_target_threads = min(min_target_threads, len(target_obj.thread_objs))
max_target_threads = max(max_target_threads, len(target_obj.thread_objs))
min_target_lineobjs = min(min_target_lineobjs,
target_obj.len_lineobjs)
max_target_lineobjs = max(max_target_lineobjs,
target_obj.len_lineobjs)
print(" %s: %d hosts, %d targets, %d threads, %d lines"
% (comp, len(hosts_), len(targets),
component_threads,
component_lines))
print(" per-target: %.3f[%d, %d] threads, %.3f[%d, %d] loglines"
% (component_threads/float(len(targets)),
min_target_threads,
max_target_threads,
component_lines/float(len(targets)),
min_target_lineobjs,
max_target_lineobjs))
print()
#### report #####
requests = set()
for ds in datasources:
requests.update(ds.requests)
report.step("read", line=total_lineobjs,
component=total_components,
host=total_hosts,
target=total_targets,
thread=len(threads),
request=len(requests))
return targets_byname
def proceed(logfolder, sr, plugin, report):
datasources = loadsources(logfolder, sr, plugin)
targetobjs = readsources(datasources, sr, report)
return targetobjs
| en | 0.719765 | # Copyright (c) 2017 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # skip non-file # check file extension # NOTE # print("(LogDriver) loaded: %s" % f_dir) # skip # def _buffer_lines(self, lines): # buffer_lines = Heap(key=lambda a: a.seconds) # prv_line = [None] # def _flush_line(flush=None): # while buffer_lines: # if flush and buffer_lines.distance < flush: # break # line = buffer_lines.pop() # if prv_line[0] is not None: # prv_line[0].nxt_logline = line # line.prv_logline = prv_line[0] # assert prv_line[0] <= line # yield line # prv_line[0] = line # for line in lines: # assert isinstance(line, LogLine) # buffer_lines.push(line) # for line in _flush_line(1): # yield line # for line in _flush_line(): # yield line # convert component # collect requests # current_path = path.dirname(os.path.realpath(__file__)) # convert component # step1: load related log files #### summary #### # step2: read sources #### summary #### #### report ##### | 1.957511 | 2 |
IPython/lib/tests/test_irunner_pylab_magic.py | dchichkov/ipython | 0 | 10460 | """Test suite for pylab_import_all magic
Modified from the irunner module but using regex.
"""
# Global to make tests extra verbose and help debugging
VERBOSE = True
# stdlib imports
import StringIO
import sys
import unittest
import re
# IPython imports
from IPython.lib import irunner
from IPython.testing import decorators
def pylab_not_importable():
"""Test if importing pylab fails with RuntimeError (true when having no display)"""
try:
import pylab
return False
except RuntimeError:
return True
# Testing code begins
class RunnerTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO.StringIO()
#self.out = sys.stdout
def _test_runner(self,runner,source,output):
"""Test that a given runner's input/output match."""
runner.run_source(source)
out = self.out.getvalue()
#out = ''
# this output contains nasty \r\n lineends, and the initial ipython
# banner. clean it up for comparison, removing lines of whitespace
output_l = [l for l in output.splitlines() if l and not l.isspace()]
out_l = [l for l in out.splitlines() if l and not l.isspace()]
mismatch = 0
if len(output_l) != len(out_l):
message = ("Mismatch in number of lines\n\n"
"Expected:\n"
"~~~~~~~~~\n"
"%s\n\n"
"Got:\n"
"~~~~~~~~~\n"
"%s"
) % ("\n".join(output_l), "\n".join(out_l))
self.fail(message)
for n in range(len(output_l)):
# Do a line-by-line comparison
ol1 = output_l[n].strip()
ol2 = out_l[n].strip()
if not re.match(ol1,ol2):
mismatch += 1
if VERBOSE:
print '<<< line %s does not match:' % n
print repr(ol1)
print repr(ol2)
print '>>>'
self.assert_(mismatch==0,'Number of mismatched lines: %s' %
mismatch)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_enabled(self):
"Verify that plot is available when pylab_import_all = True"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = True
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = True
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: True
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_disabled(self):
"Verify that plot is not available when pylab_import_all = False"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = False
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = False
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: False
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
| """Test suite for pylab_import_all magic
Modified from the irunner module but using regex.
"""
# Global to make tests extra verbose and help debugging
VERBOSE = True
# stdlib imports
import StringIO
import sys
import unittest
import re
# IPython imports
from IPython.lib import irunner
from IPython.testing import decorators
def pylab_not_importable():
"""Test if importing pylab fails with RuntimeError (true when having no display)"""
try:
import pylab
return False
except RuntimeError:
return True
# Testing code begins
class RunnerTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO.StringIO()
#self.out = sys.stdout
def _test_runner(self,runner,source,output):
"""Test that a given runner's input/output match."""
runner.run_source(source)
out = self.out.getvalue()
#out = ''
# this output contains nasty \r\n lineends, and the initial ipython
# banner. clean it up for comparison, removing lines of whitespace
output_l = [l for l in output.splitlines() if l and not l.isspace()]
out_l = [l for l in out.splitlines() if l and not l.isspace()]
mismatch = 0
if len(output_l) != len(out_l):
message = ("Mismatch in number of lines\n\n"
"Expected:\n"
"~~~~~~~~~\n"
"%s\n\n"
"Got:\n"
"~~~~~~~~~\n"
"%s"
) % ("\n".join(output_l), "\n".join(out_l))
self.fail(message)
for n in range(len(output_l)):
# Do a line-by-line comparison
ol1 = output_l[n].strip()
ol2 = out_l[n].strip()
if not re.match(ol1,ol2):
mismatch += 1
if VERBOSE:
print '<<< line %s does not match:' % n
print repr(ol1)
print repr(ol2)
print '>>>'
self.assert_(mismatch==0,'Number of mismatched lines: %s' %
mismatch)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_enabled(self):
"Verify that plot is available when pylab_import_all = True"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = True
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = True
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: True
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_disabled(self):
"Verify that plot is not available when pylab_import_all = False"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = False
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = False
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: False
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
| en | 0.581641 | Test suite for pylab_import_all magic Modified from the irunner module but using regex. # Global to make tests extra verbose and help debugging # stdlib imports # IPython imports Test if importing pylab fails with RuntimeError (true when having no display) # Testing code begins #self.out = sys.stdout Test that a given runner's input/output match. #out = '' # this output contains nasty \r\n lineends, and the initial ipython # banner. clean it up for comparison, removing lines of whitespace # Do a line-by-line comparison from IPython.config.application import Application app = Application.instance() app.pylab_import_all = True pylab ip=get_ipython() 'plot' in ip.user_ns In \[1\]: from IPython\.config\.application import Application In \[2\]: app = Application\.instance\(\) In \[3\]: app\.pylab_import_all = True In \[4\]: pylab ^Welcome to pylab, a matplotlib-based Python environment For more information, type 'help\(pylab\)'\. In \[5\]: ip=get_ipython\(\) In \[6\]: \'plot\' in ip\.user_ns Out\[6\]: True from IPython.config.application import Application app = Application.instance() app.pylab_import_all = False pylab ip=get_ipython() 'plot' in ip.user_ns In \[1\]: from IPython\.config\.application import Application In \[2\]: app = Application\.instance\(\) In \[3\]: app\.pylab_import_all = False In \[4\]: pylab ^Welcome to pylab, a matplotlib-based Python environment For more information, type 'help\(pylab\)'\. In \[5\]: ip=get_ipython\(\) In \[6\]: \'plot\' in ip\.user_ns Out\[6\]: False | 2.559547 | 3 |
checkpoint.py | GooLee0123/MBRNN | 1 | 10461 | import logging
import os
import shutil
import time
import torch
model_state = 'model_state.pt'
trainer_state = 'trainer_state.pt'
class Checkpoint():
def __init__(self, step, epoch, model, optim, path=None, opt=None):
self.step = step
self.epoch = epoch
self.model = model
self.optim = optim
self._path = path
self.opt = opt
self.logger = logging.getLogger(__name__)
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
@classmethod
def load(cls, model, optim=None, opt=None):
logger = logging.getLogger(__name__)
all_times = sorted(os.listdir(opt.ckpt_fd), reverse=True)
fchckpt = os.path.join(opt.ckpt_fd, all_times[0])
logger.info("load checkpoint from %s" % fchckpt)
resume_model = torch.load(os.path.join(fchckpt, model_state),
map_location=opt.device)
resume_checkpoint = torch.load(os.path.join(fchckpt, trainer_state),
map_location=opt.device)
model.load_state_dict(resume_model)
if optim is not None:
optim.load_state_dict(resume_checkpoint['optimizer'])
return Checkpoint(step=resume_checkpoint['step'],
epoch=resume_checkpoint['epoch'],
model=model,
optim=optim,
path=opt.ckpt_fd)
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
path = os.path.join(self.opt.ckpt_fd, date_time)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save(
{'epoch': self.epoch,
'step': self.step,
'optimizer': self.optim.state_dict()},
os.path.join(path, trainer_state))
torch.save(
self.model.state_dict(), os.path.join(path, model_state))
log_msg = "Validation loss being smaller than previous "
log_msg += "minimum, checkpoint is saved at %s" % path
self.logger.info(log_msg)
return path
| import logging
import os
import shutil
import time
import torch
model_state = 'model_state.pt'
trainer_state = 'trainer_state.pt'
class Checkpoint():
def __init__(self, step, epoch, model, optim, path=None, opt=None):
self.step = step
self.epoch = epoch
self.model = model
self.optim = optim
self._path = path
self.opt = opt
self.logger = logging.getLogger(__name__)
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
@classmethod
def load(cls, model, optim=None, opt=None):
logger = logging.getLogger(__name__)
all_times = sorted(os.listdir(opt.ckpt_fd), reverse=True)
fchckpt = os.path.join(opt.ckpt_fd, all_times[0])
logger.info("load checkpoint from %s" % fchckpt)
resume_model = torch.load(os.path.join(fchckpt, model_state),
map_location=opt.device)
resume_checkpoint = torch.load(os.path.join(fchckpt, trainer_state),
map_location=opt.device)
model.load_state_dict(resume_model)
if optim is not None:
optim.load_state_dict(resume_checkpoint['optimizer'])
return Checkpoint(step=resume_checkpoint['step'],
epoch=resume_checkpoint['epoch'],
model=model,
optim=optim,
path=opt.ckpt_fd)
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
path = os.path.join(self.opt.ckpt_fd, date_time)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save(
{'epoch': self.epoch,
'step': self.step,
'optimizer': self.optim.state_dict()},
os.path.join(path, trainer_state))
torch.save(
self.model.state_dict(), os.path.join(path, model_state))
log_msg = "Validation loss being smaller than previous "
log_msg += "minimum, checkpoint is saved at %s" % path
self.logger.info(log_msg)
return path
| none | 1 | 2.37965 | 2 |
|
test/eval_mines_color.py | alalagong/LEDNet | 3 | 10462 | import numpy as np
import torch
import os
import cv2
import importlib
from dataset import *
from PIL import Image
from argparse import ArgumentParser
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from dataset import cityscapes
from lednet import Net
from transform import Relabel, ToLabel, Colorize
import visdom
NUM_CHANNELS = 3
NUM_CLASSES = 20
#* *******************测试单张图片****************************
image_transform = ToPILImage()
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
def main(args):
modelpath = args.loadDir + args.loadModel
weightspath = args.loadDir + args.loadWeights
print("Loading model: " + modelpath)
print("Loading weights: " + weightspath)
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
# model.load_state_dict(torch.load(args.state))
# model.load_state_dict(torch.load(weightspath)) #not working if missing key
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print("Model and weights LOADED successfully")
model.eval()
if (not os.path.exists(args.datadir)):
print("Error: datadir could not be loaded")
# loader = DataLoader(
# cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset),
# num_workers=args.num_workers, batch_size=1 ,shuffle=False)
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
name ="4.png"
with open(image_path_city('/home/gongyiqun/images', name), 'rb') as f:
images = load_image(f).convert('RGB')
images = input_transform_cityscapes(images)
# For visualizer:
# must launch in other window "python3.6 -m visdom.server -port 8097"
# and access localhost:8097 to see it
if (args.visualize):
vis = visdom.Visdom()
if (not args.cpu):
images = images.cuda()
# labels = labels.cuda()
a=torch.unsqueeze(images,0)
inputs = Variable(a)
# targets = Variable(labels)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
# label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
label_color = Colorize()(label.unsqueeze(0))
filenameSave = "./save_color/"+"Others/"+name
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
# image_transform(label.byte()).save(filenameSave)
label_save = ToPILImage()(label_color)
label_save = label_save.resize((1241, 376), Image.BILINEAR)
# label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA)
label_save.save(filenameSave)
if (args.visualize):
vis.image(label_color.numpy())
# print(step, filenameSave)
# for step, (images, labels, filename, filenameGt) in enumerate(loader):
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--state')
parser.add_argument('--loadDir', default="../save/logs(KITTI)/")
parser.add_argument('--loadWeights', default="model_best.pth")
parser.add_argument('--loadModel', default="lednet.py")
parser.add_argument('--subset', default="val") # can be val, test, train, demoSequence
parser.add_argument('--datadir', default="")
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--visualize', action='store_true')
main(parser.parse_args())
| import numpy as np
import torch
import os
import cv2
import importlib
from dataset import *
from PIL import Image
from argparse import ArgumentParser
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from dataset import cityscapes
from lednet import Net
from transform import Relabel, ToLabel, Colorize
import visdom
NUM_CHANNELS = 3
NUM_CLASSES = 20
#* *******************测试单张图片****************************
image_transform = ToPILImage()
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
def main(args):
modelpath = args.loadDir + args.loadModel
weightspath = args.loadDir + args.loadWeights
print("Loading model: " + modelpath)
print("Loading weights: " + weightspath)
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
# model.load_state_dict(torch.load(args.state))
# model.load_state_dict(torch.load(weightspath)) #not working if missing key
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print("Model and weights LOADED successfully")
model.eval()
if (not os.path.exists(args.datadir)):
print("Error: datadir could not be loaded")
# loader = DataLoader(
# cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset),
# num_workers=args.num_workers, batch_size=1 ,shuffle=False)
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
name ="4.png"
with open(image_path_city('/home/gongyiqun/images', name), 'rb') as f:
images = load_image(f).convert('RGB')
images = input_transform_cityscapes(images)
# For visualizer:
# must launch in other window "python3.6 -m visdom.server -port 8097"
# and access localhost:8097 to see it
if (args.visualize):
vis = visdom.Visdom()
if (not args.cpu):
images = images.cuda()
# labels = labels.cuda()
a=torch.unsqueeze(images,0)
inputs = Variable(a)
# targets = Variable(labels)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
# label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
label_color = Colorize()(label.unsqueeze(0))
filenameSave = "./save_color/"+"Others/"+name
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
# image_transform(label.byte()).save(filenameSave)
label_save = ToPILImage()(label_color)
label_save = label_save.resize((1241, 376), Image.BILINEAR)
# label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA)
label_save.save(filenameSave)
if (args.visualize):
vis.image(label_color.numpy())
# print(step, filenameSave)
# for step, (images, labels, filename, filenameGt) in enumerate(loader):
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--state')
parser.add_argument('--loadDir', default="../save/logs(KITTI)/")
parser.add_argument('--loadWeights', default="model_best.pth")
parser.add_argument('--loadModel', default="lednet.py")
parser.add_argument('--subset', default="val") # can be val, test, train, demoSequence
parser.add_argument('--datadir', default="")
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--visualize', action='store_true')
main(parser.parse_args())
| en | 0.436171 | #* *******************测试单张图片**************************** # Normalize([.485, .456, .406], [.229, .224, .225]), # model.load_state_dict(torch.load(args.state)) # model.load_state_dict(torch.load(weightspath)) #not working if missing key # custom function to load model when not all dict elements # loader = DataLoader( # cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset), # num_workers=args.num_workers, batch_size=1 ,shuffle=False) # Normalize([.485, .456, .406], [.229, .224, .225]), # For visualizer: # must launch in other window "python3.6 -m visdom.server -port 8097" # and access localhost:8097 to see it # labels = labels.cuda() # targets = Variable(labels) # label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0)) # image_transform(label.byte()).save(filenameSave) # label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA) # print(step, filenameSave) # for step, (images, labels, filename, filenameGt) in enumerate(loader): # can be val, test, train, demoSequence | 2.357113 | 2 |
tests/test_resource_linkage.py | firesock/pydantic-jsonapi | 0 | 10463 | import pytest
from pytest import raises
from pydantic_jsonapi.resource_linkage import ResourceLinkage
from pydantic import BaseModel, ValidationError
class ThingWithLinkageData(BaseModel):
data: ResourceLinkage
class TestResourceLinks:
@pytest.mark.parametrize(
'linkage, message',
[
(
None,
'null is valid for empty to-one relationships',
),
(
[],
'empty list valid for empty to-many relationships.',
),
(
{'id': 'abc123', 'type': 'item', 'meta': None},
'single resource identifier valid for non-empty to-one relationships.',
),
(
[
{'id': 'abc123', 'type': 'item', 'meta': None},
{'id': 'def456', 'type': 'item', 'meta': None},
],
'array of resource identifiers valid for non-empty to-many relationships.',
),
],
)
def test_valid_possibilities(self, linkage, message):
structure_to_validate = {
'data': linkage
}
validated = ThingWithLinkageData(**structure_to_validate)
assert validated.dict() == structure_to_validate, message
def test_invalid_resource_identifier(self):
structure_to_validate = {
'data': {}
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data', 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data',), 'msg': 'value is not a valid list', 'type': 'type_error.list'},
]
def test_invalid_resource_identifier_array(self):
structure_to_validate = {
'data': [
{}
],
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data',), 'msg': 'value is not a valid dict', 'type': 'type_error.dict'},
{'loc': ('data', 0, 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 0, 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
]
| import pytest
from pytest import raises
from pydantic_jsonapi.resource_linkage import ResourceLinkage
from pydantic import BaseModel, ValidationError
class ThingWithLinkageData(BaseModel):
data: ResourceLinkage
class TestResourceLinks:
@pytest.mark.parametrize(
'linkage, message',
[
(
None,
'null is valid for empty to-one relationships',
),
(
[],
'empty list valid for empty to-many relationships.',
),
(
{'id': 'abc123', 'type': 'item', 'meta': None},
'single resource identifier valid for non-empty to-one relationships.',
),
(
[
{'id': 'abc123', 'type': 'item', 'meta': None},
{'id': 'def456', 'type': 'item', 'meta': None},
],
'array of resource identifiers valid for non-empty to-many relationships.',
),
],
)
def test_valid_possibilities(self, linkage, message):
structure_to_validate = {
'data': linkage
}
validated = ThingWithLinkageData(**structure_to_validate)
assert validated.dict() == structure_to_validate, message
def test_invalid_resource_identifier(self):
structure_to_validate = {
'data': {}
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data', 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data',), 'msg': 'value is not a valid list', 'type': 'type_error.list'},
]
def test_invalid_resource_identifier_array(self):
structure_to_validate = {
'data': [
{}
],
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data',), 'msg': 'value is not a valid dict', 'type': 'type_error.dict'},
{'loc': ('data', 0, 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 0, 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
]
| none | 1 | 2.512387 | 3 |
|
src/tensorflow/keras_cnn.py | del680202/MachineLearning-memo | 4 | 10464 | import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
| import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
| en | 0.760369 | # load data # reshape # rescale # convert class vectors to binary class matrices (one hot vectors) # build model | 2.822208 | 3 |
tests/blas/nodes/ger_test.py | xiacijie/dace | 1 | 10465 | <reponame>xiacijie/dace
#!/usr/bin/env python3
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.dataflow.streaming_memory import StreamingMemory
from dace.transformation.interstate.sdfg_nesting import InlineSDFG
from dace.transformation.interstate.fpga_transform_sdfg import FPGATransformSDFG
import numpy as np
import argparse
import scipy
import dace
from dace.memlet import Memlet
import dace.libraries.blas as blas
from dace.libraries.standard.memory import aligned_ndarray
def pure_graph(implementation, dtype, veclen):
m = dace.symbol("m")
n = dace.symbol("n")
vtype = dace.vector(dtype, veclen)
sdfg = dace.SDFG("ger_test")
state = sdfg.add_state("ger")
sdfg.add_symbol("alpha", dtype)
sdfg.add_array("x", shape=[m], dtype=dtype)
sdfg.add_array("y", shape=[n / veclen], dtype=vtype)
sdfg.add_array("A", shape=[m, n / veclen], dtype=vtype)
sdfg.add_array("res", shape=[m, n / veclen], dtype=vtype)
x = state.add_read("x")
y = state.add_read("y")
A = state.add_read("A")
res = state.add_write("res")
ger_node = blas.Ger(name="ger")
ger_node.implementation = implementation
state.add_memlet_path(x, ger_node, dst_conn="_x", memlet=Memlet("x[0:m]"))
state.add_memlet_path(y,
ger_node,
dst_conn="_y",
memlet=Memlet(f"y[0:n/{veclen}]"))
state.add_memlet_path(A,
ger_node,
dst_conn="_A",
memlet=Memlet(f"A[0:m, 0:n/{veclen}]"))
state.add_memlet_path(ger_node,
res,
src_conn="_res",
memlet=Memlet(f"res[0:m, 0:n/{veclen}]"))
return ger_node, state, sdfg
def fpga_graph(dtype, veclen, tile_size_x, tile_size_y):
ger_node, state, sdfg = pure_graph("FPGA", dtype, veclen)
ger_node.expand(sdfg, state, tile_size_x=tile_size_x, tile_size_y=tile_size_y)
sdfg.apply_transformations_repeated([FPGATransformSDFG, InlineSDFG])
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated(
[InlineSDFG, StreamingMemory], [{}, {
"storage": dace.StorageType.FPGA_Local
}])
return sdfg
def run_test(ger, target):
x = np.ndarray(m, dtype=np.float32)
y = np.ndarray(n, dtype=np.float32)
A = np.ndarray((m, n), dtype=np.float32)
res = A.copy()
ref = res.copy()
x[:] = np.random.rand(m).astype(np.float32)
y[:] = np.random.rand(n).astype(np.float32)
A[:] = np.random.rand(m, n).astype(np.float32)
ger(alpha=alpha, x=x, y=y, A=A, res=res, m=m, n=n)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=A)
diff = np.linalg.norm(np.subtract(res, ref))
if diff >= args.eps * n * m:
raise RuntimeError(
"Unexpected result returned from ger rank 1 operation: "
"got:\n{}\nexpected:\n{} on {}".format(A, ref, target))
else:
print("Ok")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int, nargs="?", default=256)
parser.add_argument("M", type=int, nargs="?", default=512)
parser.add_argument("tile_size_x", type=int, nargs="?", default=16)
parser.add_argument("tile_size_y", type=int, nargs="?", default=32)
parser.add_argument("alpha", type=np.float32, nargs="?", default=1.0)
parser.add_argument("--target", dest="target", default="pure")
parser.add_argument("--eps", type=float, default=1e-6)
parser.add_argument("--veclen", type=int, default=8)
args = parser.parse_args()
n = args.N
m = args.M
tile_size_x = args.tile_size_x
tile_size_y = args.tile_size_y
alpha = args.alpha
veclen = args.veclen
if args.target == "pure":
ger_node, state, sdfg = pure_graph("pure", dace.float32, veclen)
ger_node.expand(sdfg, state)
sdfg.apply_transformations_repeated([InlineSDFG])
elif args.target == "fpga":
sdfg = fpga_graph(dace.float32, veclen, tile_size_x, tile_size_y)
else:
print("Unsupported target")
exit(-1)
x = aligned_ndarray(np.random.rand(m).astype(np.float32), alignment=4*veclen)
y = aligned_ndarray(np.random.rand(n).astype(np.float32), alignment=4*veclen)
A = aligned_ndarray(np.random.rand(m, n).astype(np.float32), alignment=4*veclen)
res = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
ref = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
res[:] = A[:]
ref[:] = A[:]
sdfg(x=x, y=y, A=A, res=res, m=dace.int32(m), n=dace.int32(n), alpha=alpha)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=ref)
diff = np.linalg.norm(res - ref)
if diff >= args.eps * n * m:
raise RuntimeError(f"Validation failed: {diff}")
else:
print("Validation successful.")
| #!/usr/bin/env python3
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.dataflow.streaming_memory import StreamingMemory
from dace.transformation.interstate.sdfg_nesting import InlineSDFG
from dace.transformation.interstate.fpga_transform_sdfg import FPGATransformSDFG
import numpy as np
import argparse
import scipy
import dace
from dace.memlet import Memlet
import dace.libraries.blas as blas
from dace.libraries.standard.memory import aligned_ndarray
def pure_graph(implementation, dtype, veclen):
m = dace.symbol("m")
n = dace.symbol("n")
vtype = dace.vector(dtype, veclen)
sdfg = dace.SDFG("ger_test")
state = sdfg.add_state("ger")
sdfg.add_symbol("alpha", dtype)
sdfg.add_array("x", shape=[m], dtype=dtype)
sdfg.add_array("y", shape=[n / veclen], dtype=vtype)
sdfg.add_array("A", shape=[m, n / veclen], dtype=vtype)
sdfg.add_array("res", shape=[m, n / veclen], dtype=vtype)
x = state.add_read("x")
y = state.add_read("y")
A = state.add_read("A")
res = state.add_write("res")
ger_node = blas.Ger(name="ger")
ger_node.implementation = implementation
state.add_memlet_path(x, ger_node, dst_conn="_x", memlet=Memlet("x[0:m]"))
state.add_memlet_path(y,
ger_node,
dst_conn="_y",
memlet=Memlet(f"y[0:n/{veclen}]"))
state.add_memlet_path(A,
ger_node,
dst_conn="_A",
memlet=Memlet(f"A[0:m, 0:n/{veclen}]"))
state.add_memlet_path(ger_node,
res,
src_conn="_res",
memlet=Memlet(f"res[0:m, 0:n/{veclen}]"))
return ger_node, state, sdfg
def fpga_graph(dtype, veclen, tile_size_x, tile_size_y):
ger_node, state, sdfg = pure_graph("FPGA", dtype, veclen)
ger_node.expand(sdfg, state, tile_size_x=tile_size_x, tile_size_y=tile_size_y)
sdfg.apply_transformations_repeated([FPGATransformSDFG, InlineSDFG])
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated(
[InlineSDFG, StreamingMemory], [{}, {
"storage": dace.StorageType.FPGA_Local
}])
return sdfg
def run_test(ger, target):
x = np.ndarray(m, dtype=np.float32)
y = np.ndarray(n, dtype=np.float32)
A = np.ndarray((m, n), dtype=np.float32)
res = A.copy()
ref = res.copy()
x[:] = np.random.rand(m).astype(np.float32)
y[:] = np.random.rand(n).astype(np.float32)
A[:] = np.random.rand(m, n).astype(np.float32)
ger(alpha=alpha, x=x, y=y, A=A, res=res, m=m, n=n)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=A)
diff = np.linalg.norm(np.subtract(res, ref))
if diff >= args.eps * n * m:
raise RuntimeError(
"Unexpected result returned from ger rank 1 operation: "
"got:\n{}\nexpected:\n{} on {}".format(A, ref, target))
else:
print("Ok")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int, nargs="?", default=256)
parser.add_argument("M", type=int, nargs="?", default=512)
parser.add_argument("tile_size_x", type=int, nargs="?", default=16)
parser.add_argument("tile_size_y", type=int, nargs="?", default=32)
parser.add_argument("alpha", type=np.float32, nargs="?", default=1.0)
parser.add_argument("--target", dest="target", default="pure")
parser.add_argument("--eps", type=float, default=1e-6)
parser.add_argument("--veclen", type=int, default=8)
args = parser.parse_args()
n = args.N
m = args.M
tile_size_x = args.tile_size_x
tile_size_y = args.tile_size_y
alpha = args.alpha
veclen = args.veclen
if args.target == "pure":
ger_node, state, sdfg = pure_graph("pure", dace.float32, veclen)
ger_node.expand(sdfg, state)
sdfg.apply_transformations_repeated([InlineSDFG])
elif args.target == "fpga":
sdfg = fpga_graph(dace.float32, veclen, tile_size_x, tile_size_y)
else:
print("Unsupported target")
exit(-1)
x = aligned_ndarray(np.random.rand(m).astype(np.float32), alignment=4*veclen)
y = aligned_ndarray(np.random.rand(n).astype(np.float32), alignment=4*veclen)
A = aligned_ndarray(np.random.rand(m, n).astype(np.float32), alignment=4*veclen)
res = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
ref = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
res[:] = A[:]
ref[:] = A[:]
sdfg(x=x, y=y, A=A, res=res, m=dace.int32(m), n=dace.int32(n), alpha=alpha)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=ref)
diff = np.linalg.norm(res - ref)
if diff >= args.eps * n * m:
raise RuntimeError(f"Validation failed: {diff}")
else:
print("Validation successful.") | en | 0.474033 | #!/usr/bin/env python3 # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. | 2.053641 | 2 |
spp.py | ninfueng/torch-cifar | 0 | 10466 | <reponame>ninfueng/torch-cifar
import math
from typing import List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
@torch.jit.script
def spatial_pyramid_pool(
input: Tensor, bins: Union[int, List[int]], mode: str = "max"
) -> Tensor:
"""Spatial Pyramid Pooling: https://arxiv.org/pdf/1406.4729.pdf
Args:
input (Tensor): an input tensor expected from the convolutional layer.
bins (List[int]): a list of integer of preferred size of outputs.
mode (str): how to reduce the spatial space.
Returns:
outputs (Tensor): a flatten tensor with size (batch, bins[0] * bins[0] + bins[1]
* bins[1] + ...)
"""
assert mode in ["max", "mean", "average", "avg"]
b, _, h, w = input.shape
bins = [bins] if isinstance(bins, int) else bins
outputs = []
for bin_ in bins:
h_kernel = math.ceil(h / bin_)
w_kernel = math.ceil(w / bin_)
h_stride = math.floor(h / bin_)
w_stride = math.floor(w / bin_)
if mode == "max":
output = F.max_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
else:
output = F.avg_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
output = output.view(b, -1)
outputs.append(output)
outputs = torch.cat(outputs, dim=-1)
return outputs
class SpaitalPyramidPool(nn.Module):
def __init__(self, bins: Union[int, List[int]], mode: str = "max") -> None:
super().__init__()
self.bins = bins
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return spatial_pyramid_pool(input, bins=self.bins, mode=self.mode)
if __name__ == "__main__":
input = torch.zeros(1, 512, 13, 13)
output = spatial_pyramid_pool(input, [1, 2, 3], "max")
print(output.shape)
| import math
from typing import List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
@torch.jit.script
def spatial_pyramid_pool(
input: Tensor, bins: Union[int, List[int]], mode: str = "max"
) -> Tensor:
"""Spatial Pyramid Pooling: https://arxiv.org/pdf/1406.4729.pdf
Args:
input (Tensor): an input tensor expected from the convolutional layer.
bins (List[int]): a list of integer of preferred size of outputs.
mode (str): how to reduce the spatial space.
Returns:
outputs (Tensor): a flatten tensor with size (batch, bins[0] * bins[0] + bins[1]
* bins[1] + ...)
"""
assert mode in ["max", "mean", "average", "avg"]
b, _, h, w = input.shape
bins = [bins] if isinstance(bins, int) else bins
outputs = []
for bin_ in bins:
h_kernel = math.ceil(h / bin_)
w_kernel = math.ceil(w / bin_)
h_stride = math.floor(h / bin_)
w_stride = math.floor(w / bin_)
if mode == "max":
output = F.max_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
else:
output = F.avg_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
output = output.view(b, -1)
outputs.append(output)
outputs = torch.cat(outputs, dim=-1)
return outputs
class SpaitalPyramidPool(nn.Module):
def __init__(self, bins: Union[int, List[int]], mode: str = "max") -> None:
super().__init__()
self.bins = bins
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return spatial_pyramid_pool(input, bins=self.bins, mode=self.mode)
if __name__ == "__main__":
input = torch.zeros(1, 512, 13, 13)
output = spatial_pyramid_pool(input, [1, 2, 3], "max")
print(output.shape) | en | 0.589503 | Spatial Pyramid Pooling: https://arxiv.org/pdf/1406.4729.pdf Args: input (Tensor): an input tensor expected from the convolutional layer. bins (List[int]): a list of integer of preferred size of outputs. mode (str): how to reduce the spatial space. Returns: outputs (Tensor): a flatten tensor with size (batch, bins[0] * bins[0] + bins[1] * bins[1] + ...) | 3.068665 | 3 |
src/SparseSC/utils/AzureBatch/azure_batch_client.py | wofein/SparseSC | 0 | 10467 | """
usage requires these additional modules
pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start
usage:
from SparseSC import fit, aggregate_batch_results
from SparseSC.utils.azure_batch_client import BatchConfig, run
_TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S")
BATCH_DIR= "path/to/my/batch_config/"
fit(x=x,..., batchDir=BATCH_DIR)
my_config = BatchConfig(
BATCH_ACCOUNT_NAME="MySecret",
BATCH_ACCOUNT_KEY="MySecret",
BATCH_ACCOUNT_URL="MySecret",
STORAGE_ACCOUNT_NAME="MySecret",
STORAGE_ACCOUNT_KEY="MySecret",
POOL_ID="my-compute-pool",
POOL_NODE_COUNT=0,
POOL_LOW_PRIORITY_NODE_COUNT=20,
POOL_VM_SIZE="STANDARD_A1_v2",
DELETE_POOL_WHEN_DONE=False,
JOB_ID="my-job" + _TIMESTAMP,
DELETE_JOB_WHEN_DONE=False,
CONTAINER_NAME="my-blob-container",
BATCH_DIRECTORY=BATCH_DIR,
)
run(my_config)
fitted_model = aggregate_batch_results("path/to/my/batch_config")
"""
# pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import pathlib
import importlib
from collections import defaultdict
import azure.storage.blob as azureblob
from azure.storage.blob.models import ContainerPermissions
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as models
from SparseSC.cli.stt import get_config
from ..print_progress import print_progress
from .BatchConfig import BatchConfig, validate_config
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from .constants import (
_STANDARD_OUT_FILE_NAME,
_CONTAINER_OUTPUT_FILE,
_CONTAINER_INPUT_FILE,
_BATCH_CV_FILE_NAME,
)
FOLD_FILE_PATTERN = "fold_{}.yaml"
# pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme
sys.path.append(".")
sys.path.append("..")
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def build_output_sas_url(config, _blob_client):
"""
build a sas token for the output container
"""
sas_token = _blob_client.generate_container_shared_access_signature(
config.CONTAINER_NAME,
ContainerPermissions.READ
+ ContainerPermissions.WRITE
+ ContainerPermissions.DELETE
+ ContainerPermissions.LIST,
datetime.datetime.utcnow() + datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS),
start=datetime.datetime.utcnow(),
)
_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(
config.STORAGE_ACCOUNT_NAME, config.CONTAINER_NAME, sas_token
)
return _sas_url
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print("-------------------------------------------")
print("Exception encountered:")
if (
batch_exception.error
and batch_exception.error.message
and batch_exception.error.message.value
):
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print("{}:\t{}".format(mesg.key, mesg.value))
print("-------------------------------------------")
def build_output_file(container_sas_url, fold_number):
"""
Uploads a local file to an Azure Blob storage container.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
# where to store the outputs
container_dest = models.OutputFileBlobContainerDestination(
container_url=container_sas_url, path=FOLD_FILE_PATTERN.format(fold_number)
)
dest = models.OutputFileDestination(container=container_dest)
# under what conditions should you attempt to extract the outputs?
upload_options = models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
)
# https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output
return models.OutputFile(
file_pattern=_CONTAINER_OUTPUT_FILE,
destination=dest,
upload_options=upload_options,
)
def upload_file_to_container(block_blob_client, container_name, file_path, duration_hours=24):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print("Uploading file {} to container [{}]...".format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name, blob_name, file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=duration_hours),
)
sas_url = block_blob_client.make_blob_url(
container_name, blob_name, sas_token=sas_token
)
return models.ResourceFile(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)
def create_pool(config, batch_service_client):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
image_ref_to_use = models.ImageReference(
publisher="microsoft-azure-batch",
offer="ubuntu-server-container",
sku="16-04-lts",
version="latest",
)
if config.REGISTRY_USERNAME:
registry = batch.models.ContainerRegistry(
user_name=config.REGISTRY_USERNAME,
password=config.REGISTRY_PASSWORD,
registry_server=config.REGISTRY_SERVER,
)
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER],
container_registries=[registry],
)
else:
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER]
)
new_pool = batch.models.PoolAddParameter(
id=config.POOL_ID,
virtual_machine_configuration=batch.models.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
container_configuration=container_conf,
node_agent_sku_id="batch.node.ubuntu 16.04",
),
vm_size=config.POOL_VM_SIZE,
target_dedicated_nodes=config.POOL_NODE_COUNT,
target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT,
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print("Creating job [{}]...".format(job_id))
job_description = batch.models.JobAddParameter(
id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)
)
batch_service_client.job.add(job_description)
def add_tasks(
config,
_blob_client,
batch_service_client,
container_sas_url,
job_id,
_input_file,
count,
):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: The input files
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print("Adding {} tasks to job [{}]...".format(count, job_id))
tasks = list()
for fold_number in range(count):
output_file = build_output_file(container_sas_url, fold_number)
# command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\''
command_line = "/bin/bash -c 'stt {} {} {}'".format(
_CONTAINER_INPUT_FILE, _CONTAINER_OUTPUT_FILE, fold_number
)
task_container_settings = models.TaskContainerSettings(
image_name=config.DOCKER_CONTAINER
)
tasks.append(
batch.models.TaskAddParameter(
id="Task_{}".format(fold_number),
command_line=command_line,
resource_files=[_input_file],
output_files=[output_file],
container_settings=task_container_settings,
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
_start_time = datetime.datetime.now()
timeout_expiration = _start_time + timeout
# print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",)
while datetime.datetime.now() < timeout_expiration:
sys.stdout.flush()
tasks = [t for t in batch_service_client.task.list(job_id)]
incomplete_tasks = [
task for task in tasks if task.state != models.TaskState.completed
]
hours, remainder = divmod((datetime.datetime.now() - _start_time).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print_progress(
len(tasks) - len(incomplete_tasks),
len(tasks),
prefix="Time elapsed {:02}:{:02}:{:02}".format(
int(hours), int(minutes), int(seconds)
),
decimals=1,
bar_length=min(len(tasks), 50),
)
error_codes = [t.execution_info.exit_code for t in tasks if t.execution_info and t.execution_info.exit_code ]
if error_codes:
codes = defaultdict(lambda : 0)
for cd in error_codes:
codes[cd] +=1
# import pdb; pdb.set_trace()
raise RuntimeError( "\nSome tasks have exited with a non-zero exit code including: " + ", ".join([ "{}({})".format(k,v) for k, v in codes.items() ] ))
if not incomplete_tasks:
print()
return True
time.sleep(1)
print()
raise RuntimeError(
"ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)
)
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print("Printing task output...")
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, _STANDARD_OUT_FILE_NAME
)
file_text = _read_stream_as_string(stream, encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = "utf-8"
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError("could not write data to stream or decode bytes")
def _download_files(config, _blob_client, out_path, count):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
for i in range(count):
blob_name = FOLD_FILE_PATTERN.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
_blob_client.get_blob_to_path(config.CONTAINER_NAME, blob_name, out_path)
def _download_results(config, _blob_client, out_path, count, ptrn=FOLD_FILE_PATTERN):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
results = []
for i in range(count):
blob_name = ptrn.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
with _blob_client.get_blob_to_stream(
config.CONTAINER_NAME, blob_name, out_path
) as blob:
results[i] = load(blob, Loader=Loader)
return results
def run(config: BatchConfig, wait=True) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:param boolean wait: If true, wait for the batch to complete and then
download the results to file
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print(
'Synthetic Controls Run "{}" start time: {}'.format(config.JOB_ID, start_time)
)
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
blob_client.create_container(config.CONTAINER_NAME, fail_on_exist=False)
CONTAINER_SAS_URL = build_output_sas_url(config, blob_client)
# The collection of data files that are to be processed by the tasks.
input_file_path = os.path.join(sys.path[0], _LOCAL_INPUT_FILE)
# Upload the data files.
input_file = upload_file_to_container(
blob_client, config.CONTAINER_NAME, input_file_path, config.STORAGE_ACCESS_DURATION_HRS
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
try:
create_pool(config, batch_client)
print("Created pool: ", config.POOL_ID)
except models.BatchErrorException:
print("Using pool: ", config.POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config.JOB_ID, config.POOL_ID)
# Add the tasks to the job.
add_tasks(
config,
blob_client,
batch_client,
CONTAINER_SAS_URL,
config.JOB_ID,
input_file,
n_folds,
)
if not wait:
return
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
def load_results(config: BatchConfig) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print('Load result for job "{}" start time: {}'.format(config.JOB_ID, start_time))
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
if __name__ == "__main__":
# TODO: this is not an ideal API
config_module = importlib.__import__("config")
run(config_module.config)
| """
usage requires these additional modules
pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start
usage:
from SparseSC import fit, aggregate_batch_results
from SparseSC.utils.azure_batch_client import BatchConfig, run
_TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S")
BATCH_DIR= "path/to/my/batch_config/"
fit(x=x,..., batchDir=BATCH_DIR)
my_config = BatchConfig(
BATCH_ACCOUNT_NAME="MySecret",
BATCH_ACCOUNT_KEY="MySecret",
BATCH_ACCOUNT_URL="MySecret",
STORAGE_ACCOUNT_NAME="MySecret",
STORAGE_ACCOUNT_KEY="MySecret",
POOL_ID="my-compute-pool",
POOL_NODE_COUNT=0,
POOL_LOW_PRIORITY_NODE_COUNT=20,
POOL_VM_SIZE="STANDARD_A1_v2",
DELETE_POOL_WHEN_DONE=False,
JOB_ID="my-job" + _TIMESTAMP,
DELETE_JOB_WHEN_DONE=False,
CONTAINER_NAME="my-blob-container",
BATCH_DIRECTORY=BATCH_DIR,
)
run(my_config)
fitted_model = aggregate_batch_results("path/to/my/batch_config")
"""
# pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import pathlib
import importlib
from collections import defaultdict
import azure.storage.blob as azureblob
from azure.storage.blob.models import ContainerPermissions
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as models
from SparseSC.cli.stt import get_config
from ..print_progress import print_progress
from .BatchConfig import BatchConfig, validate_config
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from .constants import (
_STANDARD_OUT_FILE_NAME,
_CONTAINER_OUTPUT_FILE,
_CONTAINER_INPUT_FILE,
_BATCH_CV_FILE_NAME,
)
FOLD_FILE_PATTERN = "fold_{}.yaml"
# pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme
sys.path.append(".")
sys.path.append("..")
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def build_output_sas_url(config, _blob_client):
"""
build a sas token for the output container
"""
sas_token = _blob_client.generate_container_shared_access_signature(
config.CONTAINER_NAME,
ContainerPermissions.READ
+ ContainerPermissions.WRITE
+ ContainerPermissions.DELETE
+ ContainerPermissions.LIST,
datetime.datetime.utcnow() + datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS),
start=datetime.datetime.utcnow(),
)
_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(
config.STORAGE_ACCOUNT_NAME, config.CONTAINER_NAME, sas_token
)
return _sas_url
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print("-------------------------------------------")
print("Exception encountered:")
if (
batch_exception.error
and batch_exception.error.message
and batch_exception.error.message.value
):
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print("{}:\t{}".format(mesg.key, mesg.value))
print("-------------------------------------------")
def build_output_file(container_sas_url, fold_number):
"""
Uploads a local file to an Azure Blob storage container.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
# where to store the outputs
container_dest = models.OutputFileBlobContainerDestination(
container_url=container_sas_url, path=FOLD_FILE_PATTERN.format(fold_number)
)
dest = models.OutputFileDestination(container=container_dest)
# under what conditions should you attempt to extract the outputs?
upload_options = models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
)
# https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output
return models.OutputFile(
file_pattern=_CONTAINER_OUTPUT_FILE,
destination=dest,
upload_options=upload_options,
)
def upload_file_to_container(block_blob_client, container_name, file_path, duration_hours=24):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print("Uploading file {} to container [{}]...".format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name, blob_name, file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=duration_hours),
)
sas_url = block_blob_client.make_blob_url(
container_name, blob_name, sas_token=sas_token
)
return models.ResourceFile(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)
def create_pool(config, batch_service_client):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
image_ref_to_use = models.ImageReference(
publisher="microsoft-azure-batch",
offer="ubuntu-server-container",
sku="16-04-lts",
version="latest",
)
if config.REGISTRY_USERNAME:
registry = batch.models.ContainerRegistry(
user_name=config.REGISTRY_USERNAME,
password=config.REGISTRY_PASSWORD,
registry_server=config.REGISTRY_SERVER,
)
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER],
container_registries=[registry],
)
else:
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER]
)
new_pool = batch.models.PoolAddParameter(
id=config.POOL_ID,
virtual_machine_configuration=batch.models.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
container_configuration=container_conf,
node_agent_sku_id="batch.node.ubuntu 16.04",
),
vm_size=config.POOL_VM_SIZE,
target_dedicated_nodes=config.POOL_NODE_COUNT,
target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT,
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print("Creating job [{}]...".format(job_id))
job_description = batch.models.JobAddParameter(
id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)
)
batch_service_client.job.add(job_description)
def add_tasks(
config,
_blob_client,
batch_service_client,
container_sas_url,
job_id,
_input_file,
count,
):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: The input files
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print("Adding {} tasks to job [{}]...".format(count, job_id))
tasks = list()
for fold_number in range(count):
output_file = build_output_file(container_sas_url, fold_number)
# command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\''
command_line = "/bin/bash -c 'stt {} {} {}'".format(
_CONTAINER_INPUT_FILE, _CONTAINER_OUTPUT_FILE, fold_number
)
task_container_settings = models.TaskContainerSettings(
image_name=config.DOCKER_CONTAINER
)
tasks.append(
batch.models.TaskAddParameter(
id="Task_{}".format(fold_number),
command_line=command_line,
resource_files=[_input_file],
output_files=[output_file],
container_settings=task_container_settings,
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
_start_time = datetime.datetime.now()
timeout_expiration = _start_time + timeout
# print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",)
while datetime.datetime.now() < timeout_expiration:
sys.stdout.flush()
tasks = [t for t in batch_service_client.task.list(job_id)]
incomplete_tasks = [
task for task in tasks if task.state != models.TaskState.completed
]
hours, remainder = divmod((datetime.datetime.now() - _start_time).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print_progress(
len(tasks) - len(incomplete_tasks),
len(tasks),
prefix="Time elapsed {:02}:{:02}:{:02}".format(
int(hours), int(minutes), int(seconds)
),
decimals=1,
bar_length=min(len(tasks), 50),
)
error_codes = [t.execution_info.exit_code for t in tasks if t.execution_info and t.execution_info.exit_code ]
if error_codes:
codes = defaultdict(lambda : 0)
for cd in error_codes:
codes[cd] +=1
# import pdb; pdb.set_trace()
raise RuntimeError( "\nSome tasks have exited with a non-zero exit code including: " + ", ".join([ "{}({})".format(k,v) for k, v in codes.items() ] ))
if not incomplete_tasks:
print()
return True
time.sleep(1)
print()
raise RuntimeError(
"ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)
)
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print("Printing task output...")
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, _STANDARD_OUT_FILE_NAME
)
file_text = _read_stream_as_string(stream, encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = "utf-8"
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError("could not write data to stream or decode bytes")
def _download_files(config, _blob_client, out_path, count):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
for i in range(count):
blob_name = FOLD_FILE_PATTERN.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
_blob_client.get_blob_to_path(config.CONTAINER_NAME, blob_name, out_path)
def _download_results(config, _blob_client, out_path, count, ptrn=FOLD_FILE_PATTERN):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
results = []
for i in range(count):
blob_name = ptrn.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
with _blob_client.get_blob_to_stream(
config.CONTAINER_NAME, blob_name, out_path
) as blob:
results[i] = load(blob, Loader=Loader)
return results
def run(config: BatchConfig, wait=True) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:param boolean wait: If true, wait for the batch to complete and then
download the results to file
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print(
'Synthetic Controls Run "{}" start time: {}'.format(config.JOB_ID, start_time)
)
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
blob_client.create_container(config.CONTAINER_NAME, fail_on_exist=False)
CONTAINER_SAS_URL = build_output_sas_url(config, blob_client)
# The collection of data files that are to be processed by the tasks.
input_file_path = os.path.join(sys.path[0], _LOCAL_INPUT_FILE)
# Upload the data files.
input_file = upload_file_to_container(
blob_client, config.CONTAINER_NAME, input_file_path, config.STORAGE_ACCESS_DURATION_HRS
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
try:
create_pool(config, batch_client)
print("Created pool: ", config.POOL_ID)
except models.BatchErrorException:
print("Using pool: ", config.POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config.JOB_ID, config.POOL_ID)
# Add the tasks to the job.
add_tasks(
config,
blob_client,
batch_client,
CONTAINER_SAS_URL,
config.JOB_ID,
input_file,
n_folds,
)
if not wait:
return
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
def load_results(config: BatchConfig) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print('Load result for job "{}" start time: {}'.format(config.JOB_ID, start_time))
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
if __name__ == "__main__":
# TODO: this is not an ideal API
config_module = importlib.__import__("config")
run(config_module.config)
| en | 0.671033 | usage requires these additional modules pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start usage: from SparseSC import fit, aggregate_batch_results from SparseSC.utils.azure_batch_client import BatchConfig, run _TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S") BATCH_DIR= "path/to/my/batch_config/" fit(x=x,..., batchDir=BATCH_DIR) my_config = BatchConfig( BATCH_ACCOUNT_NAME="MySecret", BATCH_ACCOUNT_KEY="MySecret", BATCH_ACCOUNT_URL="MySecret", STORAGE_ACCOUNT_NAME="MySecret", STORAGE_ACCOUNT_KEY="MySecret", POOL_ID="my-compute-pool", POOL_NODE_COUNT=0, POOL_LOW_PRIORITY_NODE_COUNT=20, POOL_VM_SIZE="STANDARD_A1_v2", DELETE_POOL_WHEN_DONE=False, JOB_ID="my-job" + _TIMESTAMP, DELETE_JOB_WHEN_DONE=False, CONTAINER_NAME="my-blob-container", BATCH_DIRECTORY=BATCH_DIR, ) run(my_config) fitted_model = aggregate_batch_results("path/to/my/batch_config") # pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc # pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme # Update the Batch and Storage account credential strings in config.py with values # unique to your accounts. These are used when constructing connection strings # for the Batch and Storage client objects. build a sas token for the output container Prints the contents of the specified Batch exception. :param batch_exception: Uploads a local file to an Azure Blob storage container. :rtype: `azure.batch.models.ResourceFile` :return: A ResourceFile initialized with a SAS URL appropriate for Batch tasks. # where to store the outputs # under what conditions should you attempt to extract the outputs? # https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output Uploads a local file to an Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param str file_path: The local path to the file. :rtype: `azure.batch.models.ResourceFile` :return: A ResourceFile initialized with a SAS URL appropriate for Batch tasks. Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: The input files :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. # command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\'' Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. # print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",) # import pdb; pdb.set_trace() Prints the stdout.txt file for each task in the job. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job with task output files to print. Read stream as string :param stream: input stream generator :param str encoding: The encoding of the file. The default is utf-8. :return: The file content. :rtype: str :param config: A :class:`BatchConfig` instance with the Azure Batch run parameters :type config: :class:BatchConfig :param boolean wait: If true, wait for the batch to complete and then download the results to file :raises BatchErrorException: If raised by the Azure Batch Python SDK # pylint: disable=too-many-locals # replace any missing values in the configuration with environment variables # Create the blob client, for use in obtaining references to # blob storage containers and uploading files to containers. # Use the blob client to create the containers in Azure Storage if they # don't yet exist. # The collection of data files that are to be processed by the tasks. # Upload the data files. # Create a Batch service client. We'll now be interacting with the Batch # service in addition to Storage # Create the pool that will contain the compute nodes that will execute the # tasks. # Create the job that will run the tasks. # Add the tasks to the job. # Pause execution until tasks reach Completed state. # Clean up storage resources # TODO: re-enable this and delete the output container too # -- print("Deleting container [{}]...".format(input_container_name)) # -- blob_client.delete_container(input_container_name) # Print out some timing info # Clean up Batch resources (if the user so chooses). :param config: A :class:`BatchConfig` instance with the Azure Batch run parameters :type config: :class:BatchConfig :raises BatchErrorException: If raised by the Azure Batch Python SDK # pylint: disable=too-many-locals # replace any missing values in the configuration with environment variables # Create the blob client, for use in obtaining references to # blob storage containers and uploading files to containers. # Create a Batch service client. We'll now be interacting with the Batch # service in addition to Storage # Pause execution until tasks reach Completed state. # Clean up storage resources # TODO: re-enable this and delete the output container too # -- print("Deleting container [{}]...".format(input_container_name)) # -- blob_client.delete_container(input_container_name) # Print out some timing info # Clean up Batch resources (if the user so chooses). # TODO: this is not an ideal API | 2.170586 | 2 |
src/vilbert/datasets/__init__.py | NoOneUST/COMP5212 | 3 | 10468 | <reponame>NoOneUST/COMP5212
from .visual_entailment_dataset import VisualEntailmentDataset
| from .visual_entailment_dataset import VisualEntailmentDataset | none | 1 | 1.014172 | 1 |
|
Dungeoneer/Treasure.py | jameslemon81/Dungeoneer | 12 | 10469 | <filename>Dungeoneer/Treasure.py
# Basic Fantasy RPG Dungeoneer Suite
# Copyright 2007-2012 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from self software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# Treasure.py -- generate treasures for Basic Fantasy RPG
###############################################################################
import Gems, Art, Coins, Magic, Unknown
import Dice
import string
def combine(lst):
lst.sort()
hits = 1
while hits:
hits = 0
for i in range(len(lst) - 1):
if lst[i] is not None and lst[i+1] is not None:
if lst[i].cat == lst[i+1].cat \
and lst[i].name == lst[i+1].name \
and lst[i].value == lst[i+1].value:
lst[i].qty += lst[i+1].qty
lst[i+1] = None
hits += 1
if hits:
lst = filter(lambda x: x is not None, lst)
return lst
def _gen_coins(argtup):
kind, n, s, b, mul = argtup
return [ Coins.Coin(kind, (Dice.D(n, s, b) * mul)) ]
def _gen_gems(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Gems.Gem() ]
return lst
def _gen_art(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Art.Art() ]
return lst
def __gen_magic(argtup):
kind, n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Magic.Magic(kind) ]
return lst
def _gen_magic(argtup):
if type(argtup) is type([]):
lst = []
for i in argtup:
lst = lst + __gen_magic(i)
return lst
else:
return __gen_magic(argtup)
_treasure_table = {
# lair treasure
'A': [
(50, _gen_coins, ("cp", 5, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 6, 0, 100)),
(40, _gen_coins, ("ep", 5, 4, 0, 100)),
(70, _gen_coins, ("gp", 10, 6, 0, 100)),
(50, _gen_coins, ("pp", 1, 10, 0, 100)),
(50, _gen_gems, (6, 6, 0, 1)),
(50, _gen_art, (6, 6, 0, 1)),
(30, _gen_magic, ("Any", 0, 0, 3, 1)),
],
'B': [
(75, _gen_coins, ("cp", 5, 10, 0, 100)),
(50, _gen_coins, ("sp", 5, 6, 0, 100)),
(50, _gen_coins, ("ep", 5, 4, 0, 100)),
(50, _gen_coins, ("gp", 3, 6, 0, 100)),
(25, _gen_gems, (1, 6, 0, 1)),
(25, _gen_art, (1, 6, 0, 1)),
(10, _gen_magic, ("AW", 0, 0, 1, 1)),
],
'C': [
(60, _gen_coins, ("cp", 6, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 4, 0, 100)),
(30, _gen_coins, ("ep", 2, 6, 0, 100)),
(25, _gen_gems, (1, 4, 0, 1)),
(25, _gen_art, (1, 4, 0, 1)),
(15, _gen_magic, ("Any", 1, 2, 0, 1)),
],
'D': [
(30, _gen_coins, ("cp", 4, 6, 0, 100)),
(45, _gen_coins, ("sp", 6, 6, 0, 100)),
(90, _gen_coins, ("gp", 5, 8, 0, 100)),
(30, _gen_gems, (1, 8, 0, 1)),
(30, _gen_art, (1, 8, 0, 1)),
(20, _gen_magic, [
("Any", 1, 2, 0, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'E': [
(30, _gen_coins, ("cp", 2, 8, 0, 100)),
(60, _gen_coins, ("sp", 6, 10, 0, 100)),
(50, _gen_coins, ("ep", 3, 8, 0, 100)),
(50, _gen_coins, ("gp", 4, 10, 0, 100)),
(10, _gen_gems, (1, 10, 0, 1)),
(10, _gen_art, (1, 10, 0, 1)),
(30, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'F': [
(40, _gen_coins, ("sp", 3, 8, 0, 100)),
(50, _gen_coins, ("ep", 4, 8, 0, 100)),
(85, _gen_coins, ("gp", 6, 10, 0, 100)),
(70, _gen_coins, ("pp", 2, 8, 0, 100)),
(20, _gen_gems, (2, 12, 0, 1)),
(20, _gen_art, (1, 12, 0, 1)),
(35, _gen_magic, [
("Non-Weapon", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'G': [
(90, _gen_coins, ("gp", 4, 6, 0, 1000)),
(75, _gen_coins, ("pp", 5, 8, 0, 100)),
(25, _gen_gems, (3, 6, 0, 1)),
(25, _gen_art, (1, 10, 0, 1)),
(50, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'H': [
(75, _gen_coins, ("cp", 8, 10, 0, 100)),
(75, _gen_coins, ("sp", 6, 10, 0, 1000)),
(75, _gen_coins, ("ep", 3, 10, 0, 1000)),
(75, _gen_coins, ("gp", 5, 8, 0, 1000)),
(75, _gen_coins, ("pp", 9, 8, 0, 100)),
(50, _gen_gems, ( 1, 100, 0, 1)),
(50, _gen_art, (10, 4, 0, 1)),
(20, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'I': [
(80, _gen_coins, ("pp", 3, 10, 0, 100)),
(50, _gen_gems, (2, 6, 0, 1)),
(50, _gen_art, (2, 6, 0, 1)),
(15, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'J': [
(45, _gen_coins, ("cp", 3, 8, 0, 100)),
(45, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'K': [
(90, _gen_coins, ("cp", 2, 10, 0, 100)),
(35, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'L': [
(50, _gen_gems, (1, 4, 0, 1)),
],
'M': [
(90, _gen_coins, ("gp", 4, 10, 0, 100)),
(90, _gen_coins, ("pp", 2, 8, 0, 1000)),
],
'N': [
(40, _gen_magic, ("Potion", 2, 4, 0, 1)),
],
'O': [
(50, _gen_magic, ("Scroll", 1, 4, 0, 1)),
],
# personal treasure
'P': [
(100, _gen_coins, ("cp", 3, 8, 0, 1)),
],
'Q': [
(100, _gen_coins, ("sp", 3, 6, 0, 1)),
],
'R': [
(100, _gen_coins, ("ep", 2, 6, 0, 1)),
],
'S': [
(100, _gen_coins, ("gp", 2, 4, 0, 1)),
],
'T': [
(100, _gen_coins, ("pp", 1, 6, 0, 1)),
],
'U': [
( 50, _gen_coins, ("cp", 1, 20, 0, 1)),
( 50, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("gp", 1, 20, 0, 1)),
( 5, _gen_gems, (1, 4, 0, 1)),
( 5, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'V': [
( 25, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("ep", 1, 20, 0, 1)),
( 50, _gen_coins, ("gp", 1, 20, 0, 1)),
( 25, _gen_coins, ("pp", 1, 20, 0, 1)),
( 10, _gen_gems, (1, 4, 0, 1)),
( 10, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U1': [
( 75, _gen_coins, ("cp", 1, 8, 0, 100)),
( 50, _gen_coins, ("sp", 1, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 4, 0, 100)),
( 7, _gen_coins, ("gp", 1, 4, 0, 100)),
( 1, _gen_coins, ("pp", 1, 4, 0, 100)),
( 7, _gen_gems, (1, 4, 0, 1)),
( 3, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U2': [
( 50, _gen_coins, ("cp", 1, 10, 0, 100)),
( 50, _gen_coins, ("sp", 1, 8, 0, 100)),
( 25, _gen_coins, ("ep", 1, 6, 0, 100)),
( 20, _gen_coins, ("gp", 1, 6, 0, 100)),
( 2, _gen_coins, ("pp", 1, 4, 0, 100)),
( 10, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U3': [
( 30, _gen_coins, ("cp", 2, 6, 0, 100)),
( 50, _gen_coins, ("sp", 1, 10, 0, 100)),
( 25, _gen_coins, ("ep", 1, 8, 0, 100)),
( 50, _gen_coins, ("gp", 1, 6, 0, 100)),
( 4, _gen_coins, ("pp", 1, 4, 0, 100)),
( 15, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 6, 0, 1)),
( 8, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U45': [
( 20, _gen_coins, ("cp", 3, 6, 0, 100)),
( 50, _gen_coins, ("sp", 2, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 10, 0, 100)),
( 50, _gen_coins, ("gp", 2, 6, 0, 100)),
( 8, _gen_coins, ("pp", 1, 4, 0, 100)),
( 20, _gen_gems, (1, 8, 0, 1)),
( 10, _gen_art, (1, 6, 0, 1)),
( 12, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U67': [
( 15, _gen_coins, ("cp", 4, 6, 0, 100)),
( 50, _gen_coins, ("sp", 3, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 12, 0, 100)),
( 70, _gen_coins, ("gp", 2, 8, 0, 100)),
( 15, _gen_coins, ("pp", 1, 4, 0, 100)),
( 30, _gen_gems, (1, 8, 0, 1)),
( 15, _gen_art, (1, 6, 0, 1)),
( 16, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U8': [
( 10, _gen_coins, ("cp", 5, 6, 0, 100)),
( 50, _gen_coins, ("sp", 5, 6, 0, 100)),
( 25, _gen_coins, ("ep", 2, 8, 0, 100)),
( 75, _gen_coins, ("gp", 4, 6, 0, 100)),
( 30, _gen_coins, ("pp", 1, 4, 0, 100)),
( 40, _gen_gems, (1, 8, 0, 1)),
( 30, _gen_art, (1, 8, 0, 1)),
( 20, _gen_magic, ("Any", 0, 0, 1, 1)),
],
# coinage
'cp': [
(100, _gen_coins, ("cp", 0, 0, 1, 1)),
],
'sp': [
(100, _gen_coins, ("sp", 0, 0, 1, 1)),
],
'ep': [
(100, _gen_coins, ("ep", 0, 0, 1, 1)),
],
'gp': [
(100, _gen_coins, ("gp", 0, 0, 1, 1)),
],
'pp': [
(100, _gen_coins, ("pp", 0, 0, 1, 1)),
],
# magic classes
'MAGIC': [ (100, _gen_magic, ("Any", 0, 0, 1, 1)), ],
'POTION': [ (100, _gen_magic, ("Potion", 0, 0, 1, 1)), ],
'SCROLL': [ (100, _gen_magic, ("Scroll", 0, 0, 1, 1)), ],
'RING': [ (100, _gen_magic, ("Ring", 0, 0, 1, 1)), ],
'WSR': [ (100, _gen_magic, ("WSR", 0, 0, 1, 1)), ],
'MISC': [ (100, _gen_magic, ("Misc", 0, 0, 1, 1)), ],
'ARMOR': [ (100, _gen_magic, ("Armor", 0, 0, 1, 1)), ],
'WEAPON': [ (100, _gen_magic, ("Weapon", 0, 0, 1, 1)), ],
}
_treasure_table['U4'] = _treasure_table['U45']
_treasure_table['U5'] = _treasure_table['U45']
_treasure_table['U6'] = _treasure_table['U67']
_treasure_table['U7'] = _treasure_table['U67']
def Types():
types = _treasure_table.keys()
ones = filter(lambda x: len(x) == 1, types)
mults = filter(lambda x: len(x) > 1, types)
ones.sort()
mults.sort()
return ones + mults
def Treasure(typ):
tr = []
try:
tbl = _treasure_table[string.upper(typ)]
for i in tbl:
if Dice.D(1, 100, 0) <= i[0]:
tr = tr + i[1](i[2])
except:
tr = [ Unknown.Unknown(typ) ]
return tr
def Factory(args):
types = []
tr = []
mult = 1
for i in args:
if type(i) is tuple:
i = Dice.D(*i)
try:
nmult = int(i)
mult = nmult
types.append("%d" % mult)
continue
except:
pass
types.append(i + ",")
for n in range(mult):
tr += Treasure(i)
types = string.join(types, " ")
if types[-1] == ',':
types = types[:-1]
return (types.upper(), combine(tr))
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Usage: Treasure.py treasuretype [ treasuretype ... ]"
sys.exit(0)
types, tr = Factory(sys.argv[1:])
print "Treasure Type " + string.upper(types)
vtot = 0.0
ocat = ''
qty_len = 1
for t in tr:
qty_len = max(len(str(t.qty)), qty_len)
qty_fmt = "%" + str(qty_len) + "d"
for t in tr:
if t.cat != ocat:
print t.cat
ocat = t.cat
if t.value != 0:
print " ", qty_fmt % t.qty, t.name, t.value, "GP ea.", \
t.value * t.qty, "GP total"
else:
print " ", qty_fmt % t.qty, t.name
for i in t.desc:
print " ", i
vtot = vtot + (t.qty * t.value)
print "----- Total Value", vtot, "GP\n"
# end of script.
| <filename>Dungeoneer/Treasure.py
# Basic Fantasy RPG Dungeoneer Suite
# Copyright 2007-2012 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from self software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# Treasure.py -- generate treasures for Basic Fantasy RPG
###############################################################################
import Gems, Art, Coins, Magic, Unknown
import Dice
import string
def combine(lst):
lst.sort()
hits = 1
while hits:
hits = 0
for i in range(len(lst) - 1):
if lst[i] is not None and lst[i+1] is not None:
if lst[i].cat == lst[i+1].cat \
and lst[i].name == lst[i+1].name \
and lst[i].value == lst[i+1].value:
lst[i].qty += lst[i+1].qty
lst[i+1] = None
hits += 1
if hits:
lst = filter(lambda x: x is not None, lst)
return lst
def _gen_coins(argtup):
kind, n, s, b, mul = argtup
return [ Coins.Coin(kind, (Dice.D(n, s, b) * mul)) ]
def _gen_gems(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Gems.Gem() ]
return lst
def _gen_art(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Art.Art() ]
return lst
def __gen_magic(argtup):
kind, n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Magic.Magic(kind) ]
return lst
def _gen_magic(argtup):
if type(argtup) is type([]):
lst = []
for i in argtup:
lst = lst + __gen_magic(i)
return lst
else:
return __gen_magic(argtup)
_treasure_table = {
# lair treasure
'A': [
(50, _gen_coins, ("cp", 5, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 6, 0, 100)),
(40, _gen_coins, ("ep", 5, 4, 0, 100)),
(70, _gen_coins, ("gp", 10, 6, 0, 100)),
(50, _gen_coins, ("pp", 1, 10, 0, 100)),
(50, _gen_gems, (6, 6, 0, 1)),
(50, _gen_art, (6, 6, 0, 1)),
(30, _gen_magic, ("Any", 0, 0, 3, 1)),
],
'B': [
(75, _gen_coins, ("cp", 5, 10, 0, 100)),
(50, _gen_coins, ("sp", 5, 6, 0, 100)),
(50, _gen_coins, ("ep", 5, 4, 0, 100)),
(50, _gen_coins, ("gp", 3, 6, 0, 100)),
(25, _gen_gems, (1, 6, 0, 1)),
(25, _gen_art, (1, 6, 0, 1)),
(10, _gen_magic, ("AW", 0, 0, 1, 1)),
],
'C': [
(60, _gen_coins, ("cp", 6, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 4, 0, 100)),
(30, _gen_coins, ("ep", 2, 6, 0, 100)),
(25, _gen_gems, (1, 4, 0, 1)),
(25, _gen_art, (1, 4, 0, 1)),
(15, _gen_magic, ("Any", 1, 2, 0, 1)),
],
'D': [
(30, _gen_coins, ("cp", 4, 6, 0, 100)),
(45, _gen_coins, ("sp", 6, 6, 0, 100)),
(90, _gen_coins, ("gp", 5, 8, 0, 100)),
(30, _gen_gems, (1, 8, 0, 1)),
(30, _gen_art, (1, 8, 0, 1)),
(20, _gen_magic, [
("Any", 1, 2, 0, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'E': [
(30, _gen_coins, ("cp", 2, 8, 0, 100)),
(60, _gen_coins, ("sp", 6, 10, 0, 100)),
(50, _gen_coins, ("ep", 3, 8, 0, 100)),
(50, _gen_coins, ("gp", 4, 10, 0, 100)),
(10, _gen_gems, (1, 10, 0, 1)),
(10, _gen_art, (1, 10, 0, 1)),
(30, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'F': [
(40, _gen_coins, ("sp", 3, 8, 0, 100)),
(50, _gen_coins, ("ep", 4, 8, 0, 100)),
(85, _gen_coins, ("gp", 6, 10, 0, 100)),
(70, _gen_coins, ("pp", 2, 8, 0, 100)),
(20, _gen_gems, (2, 12, 0, 1)),
(20, _gen_art, (1, 12, 0, 1)),
(35, _gen_magic, [
("Non-Weapon", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'G': [
(90, _gen_coins, ("gp", 4, 6, 0, 1000)),
(75, _gen_coins, ("pp", 5, 8, 0, 100)),
(25, _gen_gems, (3, 6, 0, 1)),
(25, _gen_art, (1, 10, 0, 1)),
(50, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'H': [
(75, _gen_coins, ("cp", 8, 10, 0, 100)),
(75, _gen_coins, ("sp", 6, 10, 0, 1000)),
(75, _gen_coins, ("ep", 3, 10, 0, 1000)),
(75, _gen_coins, ("gp", 5, 8, 0, 1000)),
(75, _gen_coins, ("pp", 9, 8, 0, 100)),
(50, _gen_gems, ( 1, 100, 0, 1)),
(50, _gen_art, (10, 4, 0, 1)),
(20, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'I': [
(80, _gen_coins, ("pp", 3, 10, 0, 100)),
(50, _gen_gems, (2, 6, 0, 1)),
(50, _gen_art, (2, 6, 0, 1)),
(15, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'J': [
(45, _gen_coins, ("cp", 3, 8, 0, 100)),
(45, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'K': [
(90, _gen_coins, ("cp", 2, 10, 0, 100)),
(35, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'L': [
(50, _gen_gems, (1, 4, 0, 1)),
],
'M': [
(90, _gen_coins, ("gp", 4, 10, 0, 100)),
(90, _gen_coins, ("pp", 2, 8, 0, 1000)),
],
'N': [
(40, _gen_magic, ("Potion", 2, 4, 0, 1)),
],
'O': [
(50, _gen_magic, ("Scroll", 1, 4, 0, 1)),
],
# personal treasure
'P': [
(100, _gen_coins, ("cp", 3, 8, 0, 1)),
],
'Q': [
(100, _gen_coins, ("sp", 3, 6, 0, 1)),
],
'R': [
(100, _gen_coins, ("ep", 2, 6, 0, 1)),
],
'S': [
(100, _gen_coins, ("gp", 2, 4, 0, 1)),
],
'T': [
(100, _gen_coins, ("pp", 1, 6, 0, 1)),
],
'U': [
( 50, _gen_coins, ("cp", 1, 20, 0, 1)),
( 50, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("gp", 1, 20, 0, 1)),
( 5, _gen_gems, (1, 4, 0, 1)),
( 5, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'V': [
( 25, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("ep", 1, 20, 0, 1)),
( 50, _gen_coins, ("gp", 1, 20, 0, 1)),
( 25, _gen_coins, ("pp", 1, 20, 0, 1)),
( 10, _gen_gems, (1, 4, 0, 1)),
( 10, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U1': [
( 75, _gen_coins, ("cp", 1, 8, 0, 100)),
( 50, _gen_coins, ("sp", 1, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 4, 0, 100)),
( 7, _gen_coins, ("gp", 1, 4, 0, 100)),
( 1, _gen_coins, ("pp", 1, 4, 0, 100)),
( 7, _gen_gems, (1, 4, 0, 1)),
( 3, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U2': [
( 50, _gen_coins, ("cp", 1, 10, 0, 100)),
( 50, _gen_coins, ("sp", 1, 8, 0, 100)),
( 25, _gen_coins, ("ep", 1, 6, 0, 100)),
( 20, _gen_coins, ("gp", 1, 6, 0, 100)),
( 2, _gen_coins, ("pp", 1, 4, 0, 100)),
( 10, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U3': [
( 30, _gen_coins, ("cp", 2, 6, 0, 100)),
( 50, _gen_coins, ("sp", 1, 10, 0, 100)),
( 25, _gen_coins, ("ep", 1, 8, 0, 100)),
( 50, _gen_coins, ("gp", 1, 6, 0, 100)),
( 4, _gen_coins, ("pp", 1, 4, 0, 100)),
( 15, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 6, 0, 1)),
( 8, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U45': [
( 20, _gen_coins, ("cp", 3, 6, 0, 100)),
( 50, _gen_coins, ("sp", 2, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 10, 0, 100)),
( 50, _gen_coins, ("gp", 2, 6, 0, 100)),
( 8, _gen_coins, ("pp", 1, 4, 0, 100)),
( 20, _gen_gems, (1, 8, 0, 1)),
( 10, _gen_art, (1, 6, 0, 1)),
( 12, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U67': [
( 15, _gen_coins, ("cp", 4, 6, 0, 100)),
( 50, _gen_coins, ("sp", 3, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 12, 0, 100)),
( 70, _gen_coins, ("gp", 2, 8, 0, 100)),
( 15, _gen_coins, ("pp", 1, 4, 0, 100)),
( 30, _gen_gems, (1, 8, 0, 1)),
( 15, _gen_art, (1, 6, 0, 1)),
( 16, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U8': [
( 10, _gen_coins, ("cp", 5, 6, 0, 100)),
( 50, _gen_coins, ("sp", 5, 6, 0, 100)),
( 25, _gen_coins, ("ep", 2, 8, 0, 100)),
( 75, _gen_coins, ("gp", 4, 6, 0, 100)),
( 30, _gen_coins, ("pp", 1, 4, 0, 100)),
( 40, _gen_gems, (1, 8, 0, 1)),
( 30, _gen_art, (1, 8, 0, 1)),
( 20, _gen_magic, ("Any", 0, 0, 1, 1)),
],
# coinage
'cp': [
(100, _gen_coins, ("cp", 0, 0, 1, 1)),
],
'sp': [
(100, _gen_coins, ("sp", 0, 0, 1, 1)),
],
'ep': [
(100, _gen_coins, ("ep", 0, 0, 1, 1)),
],
'gp': [
(100, _gen_coins, ("gp", 0, 0, 1, 1)),
],
'pp': [
(100, _gen_coins, ("pp", 0, 0, 1, 1)),
],
# magic classes
'MAGIC': [ (100, _gen_magic, ("Any", 0, 0, 1, 1)), ],
'POTION': [ (100, _gen_magic, ("Potion", 0, 0, 1, 1)), ],
'SCROLL': [ (100, _gen_magic, ("Scroll", 0, 0, 1, 1)), ],
'RING': [ (100, _gen_magic, ("Ring", 0, 0, 1, 1)), ],
'WSR': [ (100, _gen_magic, ("WSR", 0, 0, 1, 1)), ],
'MISC': [ (100, _gen_magic, ("Misc", 0, 0, 1, 1)), ],
'ARMOR': [ (100, _gen_magic, ("Armor", 0, 0, 1, 1)), ],
'WEAPON': [ (100, _gen_magic, ("Weapon", 0, 0, 1, 1)), ],
}
_treasure_table['U4'] = _treasure_table['U45']
_treasure_table['U5'] = _treasure_table['U45']
_treasure_table['U6'] = _treasure_table['U67']
_treasure_table['U7'] = _treasure_table['U67']
def Types():
types = _treasure_table.keys()
ones = filter(lambda x: len(x) == 1, types)
mults = filter(lambda x: len(x) > 1, types)
ones.sort()
mults.sort()
return ones + mults
def Treasure(typ):
tr = []
try:
tbl = _treasure_table[string.upper(typ)]
for i in tbl:
if Dice.D(1, 100, 0) <= i[0]:
tr = tr + i[1](i[2])
except:
tr = [ Unknown.Unknown(typ) ]
return tr
def Factory(args):
types = []
tr = []
mult = 1
for i in args:
if type(i) is tuple:
i = Dice.D(*i)
try:
nmult = int(i)
mult = nmult
types.append("%d" % mult)
continue
except:
pass
types.append(i + ",")
for n in range(mult):
tr += Treasure(i)
types = string.join(types, " ")
if types[-1] == ',':
types = types[:-1]
return (types.upper(), combine(tr))
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Usage: Treasure.py treasuretype [ treasuretype ... ]"
sys.exit(0)
types, tr = Factory(sys.argv[1:])
print "Treasure Type " + string.upper(types)
vtot = 0.0
ocat = ''
qty_len = 1
for t in tr:
qty_len = max(len(str(t.qty)), qty_len)
qty_fmt = "%" + str(qty_len) + "d"
for t in tr:
if t.cat != ocat:
print t.cat
ocat = t.cat
if t.value != 0:
print " ", qty_fmt % t.qty, t.name, t.value, "GP ea.", \
t.value * t.qty, "GP total"
else:
print " ", qty_fmt % t.qty, t.name
for i in t.desc:
print " ", i
vtot = vtot + (t.qty * t.value)
print "----- Total Value", vtot, "GP\n"
# end of script.
| en | 0.59987 | # Basic Fantasy RPG Dungeoneer Suite # Copyright 2007-2012 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above copyright # notice, self list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, self list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the author nor the names of any contributors # may be used to endorse or promote products derived from self software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # Treasure.py -- generate treasures for Basic Fantasy RPG ############################################################################### # lair treasure # personal treasure # coinage # magic classes # end of script. | 1.550305 | 2 |
covid19/COVID19/code/controller/main.py | zhanqingheng/COVID-19 | 16 | 10470 | from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
| from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
| none | 1 | 2.447662 | 2 |
|
T2API/migrations/0008_product_weight.py | hackhb18-T2/api | 0 | 10471 | # Generated by Django 2.0.2 on 2018-02-17 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('T2API', '0007_apiuser_deviceuser'),
]
operations = [
migrations.AddField(
model_name='product',
name='weight',
field=models.IntegerField(default=None, null=True),
),
]
| # Generated by Django 2.0.2 on 2018-02-17 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('T2API', '0007_apiuser_deviceuser'),
]
operations = [
migrations.AddField(
model_name='product',
name='weight',
field=models.IntegerField(default=None, null=True),
),
]
| en | 0.807271 | # Generated by Django 2.0.2 on 2018-02-17 10:50 | 1.635558 | 2 |
contrib/cirrus/podbot.py | juhp/libpod | 2 | 10472 | #!/usr/bin/env python3
# Simple and dumb script to send a message to the #podman IRC channel on frenode
# Based on example from: https://pythonspot.com/building-an-irc-bot/
import os
import time
import random
import errno
import socket
import sys
class IRC:
response_timeout = 10 # seconds
irc = socket.socket()
def __init__(self, server, nickname, channel):
self.server = server
self.nickname = nickname
self.channel = channel
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _send(self, cmdstr):
self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
def message(self, msg):
data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
print(data)
self._send(data)
@staticmethod
def fix_newlines(bufr):
return bufr.replace('\\r\\n', '\n')
def _required_response(self, needle, haystack):
start = time.time()
end = start + self.response_timeout
while time.time() < end:
if haystack.find(needle) != -1:
return (False, haystack)
time.sleep(0.1)
try:
haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
except socket.error as serr:
if serr.errno == errno.EWOULDBLOCK:
continue
raise # can't handle this
return (True, haystack) # Error
def connect(self, username, password):
# This is ugly as sin, but seems to be a working send/expect sequence
print("connecting to: {0}".format(self.server))
self.irc.connect((self.server, 6667)) #connects to the server
self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
self._send("NICK {0}".format(self.nickname))
err, haystack = self._required_response('End of /MOTD command.'
''.format(self.nickname), "")
if err:
print(self.fix_newlines(haystack))
print("Error connecting to {0}".format(self.server))
return True
print("Logging in as {0}".format(username))
self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
err, _ = self._required_response("You are now identified for", "")
if err:
print("Error logging in to {0} as {1}".format(self.server, username))
return True
print("Joining {0}".format(self.channel))
self._send("JOIN {0}".format(self.channel))
err, haystack = self._required_response("{0} {1} :End of /NAMES list."
"".format(self.nickname, self.channel),
haystack)
print(self.fix_newlines(haystack))
if err:
print("Error joining {0}".format(self.channel))
return True
return False
def quit(self):
print("Quitting")
self._send("QUIT :my work is done here")
self.irc.close()
if len(sys.argv) < 3:
print("Error: Must pass desired nick and message as parameters")
else:
irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
if not err:
irc.message(" ".join(sys.argv[2:]))
time.sleep(5.0) # avoid join/quit spam
irc.quit()
| #!/usr/bin/env python3
# Simple and dumb script to send a message to the #podman IRC channel on frenode
# Based on example from: https://pythonspot.com/building-an-irc-bot/
import os
import time
import random
import errno
import socket
import sys
class IRC:
response_timeout = 10 # seconds
irc = socket.socket()
def __init__(self, server, nickname, channel):
self.server = server
self.nickname = nickname
self.channel = channel
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _send(self, cmdstr):
self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
def message(self, msg):
data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
print(data)
self._send(data)
@staticmethod
def fix_newlines(bufr):
return bufr.replace('\\r\\n', '\n')
def _required_response(self, needle, haystack):
start = time.time()
end = start + self.response_timeout
while time.time() < end:
if haystack.find(needle) != -1:
return (False, haystack)
time.sleep(0.1)
try:
haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
except socket.error as serr:
if serr.errno == errno.EWOULDBLOCK:
continue
raise # can't handle this
return (True, haystack) # Error
def connect(self, username, password):
# This is ugly as sin, but seems to be a working send/expect sequence
print("connecting to: {0}".format(self.server))
self.irc.connect((self.server, 6667)) #connects to the server
self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
self._send("NICK {0}".format(self.nickname))
err, haystack = self._required_response('End of /MOTD command.'
''.format(self.nickname), "")
if err:
print(self.fix_newlines(haystack))
print("Error connecting to {0}".format(self.server))
return True
print("Logging in as {0}".format(username))
self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
err, _ = self._required_response("You are now identified for", "")
if err:
print("Error logging in to {0} as {1}".format(self.server, username))
return True
print("Joining {0}".format(self.channel))
self._send("JOIN {0}".format(self.channel))
err, haystack = self._required_response("{0} {1} :End of /NAMES list."
"".format(self.nickname, self.channel),
haystack)
print(self.fix_newlines(haystack))
if err:
print("Error joining {0}".format(self.channel))
return True
return False
def quit(self):
print("Quitting")
self._send("QUIT :my work is done here")
self.irc.close()
if len(sys.argv) < 3:
print("Error: Must pass desired nick and message as parameters")
else:
irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
if not err:
irc.message(" ".join(sys.argv[2:]))
time.sleep(5.0) # avoid join/quit spam
irc.quit()
| en | 0.89777 | #!/usr/bin/env python3 # Simple and dumb script to send a message to the #podman IRC channel on frenode # Based on example from: https://pythonspot.com/building-an-irc-bot/ # seconds # can't handle this # Error # This is ugly as sin, but seems to be a working send/expect sequence #connects to the server # avoid join/quit spam | 2.575957 | 3 |
changes/api/build_coverage.py | vault-the/changes | 443 | 10473 | <reponame>vault-the/changes
from changes.api.base import APIView
from changes.lib.coverage import get_coverage_by_build_id, merged_coverage_data
from changes.models.build import Build
class BuildTestCoverageAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
coverage = merged_coverage_data(get_coverage_by_build_id(build.id))
return self.respond(coverage)
| from changes.api.base import APIView
from changes.lib.coverage import get_coverage_by_build_id, merged_coverage_data
from changes.models.build import Build
class BuildTestCoverageAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
coverage = merged_coverage_data(get_coverage_by_build_id(build.id))
return self.respond(coverage) | none | 1 | 2.033235 | 2 |
|
topopt/mechanisms/problems.py | arnavbansal2764/topopt | 53 | 10474 | <reponame>arnavbansal2764/topopt<gh_stars>10-100
"""Compliant mechanism synthesis problems using topology optimization."""
import numpy
import scipy.sparse
from ..problems import ElasticityProblem
from .boundary_conditions import MechanismSynthesisBoundaryConditions
from ..utils import deleterowcol
class MechanismSynthesisProblem(ElasticityProblem):
r"""
Topology optimization problem to generate compliant mechanisms.
:math:`\begin{aligned}
\max_{\boldsymbol{\rho}} \quad &
\{u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}\}\\
\textrm{subject to}: \quad & \mathbf{K}\mathbf{u} =
\mathbf{f}_\text{in}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1,
\quad e=1, \dots, N.\\
\end{aligned}`
where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of
freedom corresponding to the output point and with zeros at all other
places.
Attributes
----------
spring_stiffnesses: numpy.ndarray
The spring stiffnesses of the
actuator and output displacement.
Emin: float
The minimum stiffness of elements.
Emax: float
The maximum stiffness of elements.
"""
@staticmethod
def lk(E: float = 1.0, nu: float = 0.3) -> numpy.ndarray:
"""
Build the element stiffness matrix.
Parameters
----------
E:
Young's modulus of the material.
nu:
Poisson's ratio of the material.
Returns
-------
The element stiffness matrix for the material.
"""
return ElasticityProblem.lk(1e0, nu)
def __init__(
self, bc: MechanismSynthesisBoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
nelx:
Number of elements in the x direction.
nely:
Number of elements in the x direction.
penalty:
Penalty value used to penalize fractional densities in SIMP.
bc:
Boundary conditions of the problem.
"""
super().__init__(bc, penalty)
self.Emin = 1e-6 # Minimum stiffness of elements
self.Emax = 1e2 # Maximum stiffness of elements
# Spring stiffnesses for the actuator and output displacement
self.spring_stiffnesses = numpy.full(
numpy.nonzero(self.f)[0].shape, 10.0)
def build_K(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
The stiffness matrix for the mesh.
"""
# Build the stiffness matrix using inheritance
K = super().build_K(xPhys, remove_constrained=False).tocsc()
# Add spring stiffnesses
spring_ids = numpy.nonzero(self.f)[0]
K[spring_ids, spring_ids] += self.spring_stiffnesses
# K = (K.T + K) / 2. # Make sure the stiffness matrix is symmetric
# Remove constrained dofs from matrix and convert to coo
if remove_constrained:
K = deleterowcol(K, self.fixed, self.fixed)
return K.tocoo()
def compute_objective(self, xPhys: numpy.ndarray, dobj: numpy.ndarray
) -> float:
r"""
Compute the objective and gradient of the mechanism synthesis problem.
The objective is :math:`u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}`
where :math:`\mathbf{l}` is a vector with the value 1 at
the degree(s) of freedom corresponding to the output point and with
zeros at all other places. The gradient of the objective is
:math:`\begin{align}
u_\text{out} &= \mathbf{l}^T\mathbf{u} = \mathbf{l}^T\mathbf{u} +
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial u_\text{out}}{\partial \rho_e} &=
(\mathbf{K}\boldsymbol{\lambda} + \mathbf{l})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
\end{align}`
where :math:`\mathbf{K}\boldsymbol{\lambda} = -\mathbf{l}`.
Parameters
----------
xPhys:
The density design variables.
dobj:
The gradient of the objective to compute.
Returns
-------
The objective of the compliant mechanism synthesis problem.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
u = self.u[:, 0][self.edofMat].reshape(-1, 8) # Displacement
λ = self.u[:, 1][self.edofMat].reshape(-1, 8) # Fixed vector (Kλ = -l)
obj = self.f[:, 1].T @ self.u[:, 0]
self.obje[:] = (λ @ self.KE * u).sum(1)
self.compute_young_moduli(xPhys, dobj) # Stores the derivative in dobj
dobj *= -self.obje
return obj
| """Compliant mechanism synthesis problems using topology optimization."""
import numpy
import scipy.sparse
from ..problems import ElasticityProblem
from .boundary_conditions import MechanismSynthesisBoundaryConditions
from ..utils import deleterowcol
class MechanismSynthesisProblem(ElasticityProblem):
r"""
Topology optimization problem to generate compliant mechanisms.
:math:`\begin{aligned}
\max_{\boldsymbol{\rho}} \quad &
\{u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}\}\\
\textrm{subject to}: \quad & \mathbf{K}\mathbf{u} =
\mathbf{f}_\text{in}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1,
\quad e=1, \dots, N.\\
\end{aligned}`
where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of
freedom corresponding to the output point and with zeros at all other
places.
Attributes
----------
spring_stiffnesses: numpy.ndarray
The spring stiffnesses of the
actuator and output displacement.
Emin: float
The minimum stiffness of elements.
Emax: float
The maximum stiffness of elements.
"""
@staticmethod
def lk(E: float = 1.0, nu: float = 0.3) -> numpy.ndarray:
"""
Build the element stiffness matrix.
Parameters
----------
E:
Young's modulus of the material.
nu:
Poisson's ratio of the material.
Returns
-------
The element stiffness matrix for the material.
"""
return ElasticityProblem.lk(1e0, nu)
def __init__(
self, bc: MechanismSynthesisBoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
nelx:
Number of elements in the x direction.
nely:
Number of elements in the x direction.
penalty:
Penalty value used to penalize fractional densities in SIMP.
bc:
Boundary conditions of the problem.
"""
super().__init__(bc, penalty)
self.Emin = 1e-6 # Minimum stiffness of elements
self.Emax = 1e2 # Maximum stiffness of elements
# Spring stiffnesses for the actuator and output displacement
self.spring_stiffnesses = numpy.full(
numpy.nonzero(self.f)[0].shape, 10.0)
def build_K(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
The stiffness matrix for the mesh.
"""
# Build the stiffness matrix using inheritance
K = super().build_K(xPhys, remove_constrained=False).tocsc()
# Add spring stiffnesses
spring_ids = numpy.nonzero(self.f)[0]
K[spring_ids, spring_ids] += self.spring_stiffnesses
# K = (K.T + K) / 2. # Make sure the stiffness matrix is symmetric
# Remove constrained dofs from matrix and convert to coo
if remove_constrained:
K = deleterowcol(K, self.fixed, self.fixed)
return K.tocoo()
def compute_objective(self, xPhys: numpy.ndarray, dobj: numpy.ndarray
) -> float:
r"""
Compute the objective and gradient of the mechanism synthesis problem.
The objective is :math:`u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}`
where :math:`\mathbf{l}` is a vector with the value 1 at
the degree(s) of freedom corresponding to the output point and with
zeros at all other places. The gradient of the objective is
:math:`\begin{align}
u_\text{out} &= \mathbf{l}^T\mathbf{u} = \mathbf{l}^T\mathbf{u} +
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial u_\text{out}}{\partial \rho_e} &=
(\mathbf{K}\boldsymbol{\lambda} + \mathbf{l})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
\end{align}`
where :math:`\mathbf{K}\boldsymbol{\lambda} = -\mathbf{l}`.
Parameters
----------
xPhys:
The density design variables.
dobj:
The gradient of the objective to compute.
Returns
-------
The objective of the compliant mechanism synthesis problem.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
u = self.u[:, 0][self.edofMat].reshape(-1, 8) # Displacement
λ = self.u[:, 1][self.edofMat].reshape(-1, 8) # Fixed vector (Kλ = -l)
obj = self.f[:, 1].T @ self.u[:, 0]
self.obje[:] = (λ @ self.KE * u).sum(1)
self.compute_young_moduli(xPhys, dobj) # Stores the derivative in dobj
dobj *= -self.obje
return obj | en | 0.618272 | Compliant mechanism synthesis problems using topology optimization. Topology optimization problem to generate compliant mechanisms. :math:`\begin{aligned} \max_{\boldsymbol{\rho}} \quad & \{u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}\}\\ \textrm{subject to}: \quad & \mathbf{K}\mathbf{u} = \mathbf{f}_\text{in}\\ & \sum_{e=1}^N v_e\rho_e \leq V_\text{frac}, \quad 0 < \rho_\min \leq \rho_e \leq 1, \quad e=1, \dots, N.\\ \end{aligned}` where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of freedom corresponding to the output point and with zeros at all other places. Attributes ---------- spring_stiffnesses: numpy.ndarray The spring stiffnesses of the actuator and output displacement. Emin: float The minimum stiffness of elements. Emax: float The maximum stiffness of elements. Build the element stiffness matrix. Parameters ---------- E: Young's modulus of the material. nu: Poisson's ratio of the material. Returns ------- The element stiffness matrix for the material. Create the topology optimization problem. Parameters ---------- nelx: Number of elements in the x direction. nely: Number of elements in the x direction. penalty: Penalty value used to penalize fractional densities in SIMP. bc: Boundary conditions of the problem. # Minimum stiffness of elements # Maximum stiffness of elements # Spring stiffnesses for the actuator and output displacement Build the stiffness matrix for the problem. Parameters ---------- xPhys: The element densisities used to build the stiffness matrix. remove_constrained: Should the constrained nodes be removed? Returns ------- The stiffness matrix for the mesh. # Build the stiffness matrix using inheritance # Add spring stiffnesses # K = (K.T + K) / 2. # Make sure the stiffness matrix is symmetric # Remove constrained dofs from matrix and convert to coo Compute the objective and gradient of the mechanism synthesis problem. The objective is :math:`u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}` where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of freedom corresponding to the output point and with zeros at all other places. The gradient of the objective is :math:`\begin{align} u_\text{out} &= \mathbf{l}^T\mathbf{u} = \mathbf{l}^T\mathbf{u} + \boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\ \frac{\partial u_\text{out}}{\partial \rho_e} &= (\mathbf{K}\boldsymbol{\lambda} + \mathbf{l})^T \frac{\partial \mathbf u}{\partial \rho_e} + \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e} \mathbf{u} = \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e} \mathbf{u} \end{align}` where :math:`\mathbf{K}\boldsymbol{\lambda} = -\mathbf{l}`. Parameters ---------- xPhys: The density design variables. dobj: The gradient of the objective to compute. Returns ------- The objective of the compliant mechanism synthesis problem. # Setup and solve FE problem # Displacement # Fixed vector (Kλ = -l) # Stores the derivative in dobj | 2.767374 | 3 |
tests/test_parse_icao24bit.py | Collen-Roller/arp | 2 | 10475 | import unittest
from flydenity import Parser
class TestParseIcao24Bit(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def test_parse_simple(self):
match = self.parser.parse("3D2591", icao24bit=True)
self.assertEqual(match, {"nation": "Germany", "description": "general", "iso2": "DE", "iso3": "DEU"})
def test_parse_strict(self):
sloppy_reg_sloppy_parser = self.parser.parse("3DX", icao24bit=True, strict=False)
sloppy_reg_strict_parser = self.parser.parse("3DX", icao24bit=True, strict=True)
strict_reg_sloppy_parser = self.parser.parse("3D2591", icao24bit=True, strict=False)
strict_reg_strict_parser = self.parser.parse("3D2591", icao24bit=True, strict=True)
self.assertTrue(sloppy_reg_sloppy_parser == strict_reg_sloppy_parser == strict_reg_strict_parser != sloppy_reg_strict_parser)
if __name__ == "__main__":
unittest.main()
| import unittest
from flydenity import Parser
class TestParseIcao24Bit(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def test_parse_simple(self):
match = self.parser.parse("3D2591", icao24bit=True)
self.assertEqual(match, {"nation": "Germany", "description": "general", "iso2": "DE", "iso3": "DEU"})
def test_parse_strict(self):
sloppy_reg_sloppy_parser = self.parser.parse("3DX", icao24bit=True, strict=False)
sloppy_reg_strict_parser = self.parser.parse("3DX", icao24bit=True, strict=True)
strict_reg_sloppy_parser = self.parser.parse("3D2591", icao24bit=True, strict=False)
strict_reg_strict_parser = self.parser.parse("3D2591", icao24bit=True, strict=True)
self.assertTrue(sloppy_reg_sloppy_parser == strict_reg_sloppy_parser == strict_reg_strict_parser != sloppy_reg_strict_parser)
if __name__ == "__main__":
unittest.main()
| none | 1 | 3.119133 | 3 |
|
ever/util/_main.py | Bobholamovic/ever | 22 | 10476 | import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
| import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
| en | 0.677289 | import ever as er def train(trainer_name): trainer = er.trainer.get_trainer(trainer_name)() trainer.run() | 2.599186 | 3 |
src/app/services/metrics_service.py | chrisbpoint/the-app | 0 | 10477 | class MetricsService:
def __init__(self, adc_data, metrics_data):
self._adc_data = adc_data
self._metrics_data = metrics_data
@property
def metrics_data(self):
return self._metrics_data
def update(self):
self._metrics_data.is_new_data_available = False
if self._adc_data.is_new_data_available:
self._metrics_data.update(self._adc_data.trace)
self._metrics_data.is_new_data_available = True
| class MetricsService:
def __init__(self, adc_data, metrics_data):
self._adc_data = adc_data
self._metrics_data = metrics_data
@property
def metrics_data(self):
return self._metrics_data
def update(self):
self._metrics_data.is_new_data_available = False
if self._adc_data.is_new_data_available:
self._metrics_data.update(self._adc_data.trace)
self._metrics_data.is_new_data_available = True
| none | 1 | 2.641519 | 3 |
|
resthelper/tests/test_build_url.py | rklonner/resthelper | 0 | 10478 | import unittest
from resthelper.utils import build_restful_url
class TestBuildUrl(unittest.TestCase):
def test_is_restful_https_url(self):
url = build_restful_url('https://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'https://[email protected]/rest/1.0/request')
def test_is_restful_http_url(self):
url = build_restful_url('http://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'http://[email protected]/rest/1.0/request')
if __name__ == '__main__':
unittest.main() | import unittest
from resthelper.utils import build_restful_url
class TestBuildUrl(unittest.TestCase):
def test_is_restful_https_url(self):
url = build_restful_url('https://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'https://[email protected]/rest/1.0/request')
def test_is_restful_http_url(self):
url = build_restful_url('http://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'http://[email protected]/rest/1.0/request')
if __name__ == '__main__':
unittest.main() | none | 1 | 3.033438 | 3 |
|
sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 0 | 10479 | """ python-rq based backend
This backend will send your messages asynchronously with python-rq.
Before using this backend, make sure that django-rq is installed and
configured.
Usage
-----
In settings.py
SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend'
RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend'
"""
from sendsms.api import get_connection
from sendsms.backends.base import BaseSmsBackend
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_rq import job
RQ_SENDSMS_BACKEND = getattr(settings, 'RQ_SENDSMS_BACKEND', None)
if not RQ_SENDSMS_BACKEND:
raise ImproperlyConfigured('Set RQ_SENDSMS_BACKEND')
@job
def send_messages(messages):
connection = get_connection(RQ_SENDSMS_BACKEND)
connection.send_messages(messages)
class SmsBackend(BaseSmsBackend):
def send_messages(self, messages):
send_messages.delay(messages)
| """ python-rq based backend
This backend will send your messages asynchronously with python-rq.
Before using this backend, make sure that django-rq is installed and
configured.
Usage
-----
In settings.py
SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend'
RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend'
"""
from sendsms.api import get_connection
from sendsms.backends.base import BaseSmsBackend
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_rq import job
RQ_SENDSMS_BACKEND = getattr(settings, 'RQ_SENDSMS_BACKEND', None)
if not RQ_SENDSMS_BACKEND:
raise ImproperlyConfigured('Set RQ_SENDSMS_BACKEND')
@job
def send_messages(messages):
connection = get_connection(RQ_SENDSMS_BACKEND)
connection.send_messages(messages)
class SmsBackend(BaseSmsBackend):
def send_messages(self, messages):
send_messages.delay(messages)
| en | 0.642827 | python-rq based backend This backend will send your messages asynchronously with python-rq. Before using this backend, make sure that django-rq is installed and configured. Usage ----- In settings.py SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend' RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend' | 2.393128 | 2 |
venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | 5,079 | 10480 | <gh_stars>1000+
#Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import CellRange
class Extension(Serialisable):
tagname = "extension"
uri = String(allow_none=True)
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
tagname = "extensionList"
# uses element group EG_ExtensionList
ext = Sequence(expected_type=Extension)
__elements__ = ('ext',)
def __init__(self,
ext=(),
):
self.ext = ext
class IgnoredError(Serialisable):
tagname = "ignoredError"
sqref = CellRange
evalError = Bool(allow_none=True)
twoDigitTextYear = Bool(allow_none=True)
numberStoredAsText = Bool(allow_none=True)
formula = Bool(allow_none=True)
formulaRange = Bool(allow_none=True)
unlockedFormula = Bool(allow_none=True)
emptyCellReference = Bool(allow_none=True)
listDataValidation = Bool(allow_none=True)
calculatedColumn = Bool(allow_none=True)
def __init__(self,
sqref=None,
evalError=False,
twoDigitTextYear=False,
numberStoredAsText=False,
formula=False,
formulaRange=False,
unlockedFormula=False,
emptyCellReference=False,
listDataValidation=False,
calculatedColumn=False,
):
self.sqref = sqref
self.evalError = evalError
self.twoDigitTextYear = twoDigitTextYear
self.numberStoredAsText = numberStoredAsText
self.formula = formula
self.formulaRange = formulaRange
self.unlockedFormula = unlockedFormula
self.emptyCellReference = emptyCellReference
self.listDataValidation = listDataValidation
self.calculatedColumn = calculatedColumn
class IgnoredErrors(Serialisable):
tagname = "ignoredErrors"
ignoredError = Sequence(expected_type=IgnoredError)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('ignoredError', 'extLst')
def __init__(self,
ignoredError=(),
extLst=None,
):
self.ignoredError = ignoredError
self.extLst = extLst
| #Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import CellRange
class Extension(Serialisable):
tagname = "extension"
uri = String(allow_none=True)
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
tagname = "extensionList"
# uses element group EG_ExtensionList
ext = Sequence(expected_type=Extension)
__elements__ = ('ext',)
def __init__(self,
ext=(),
):
self.ext = ext
class IgnoredError(Serialisable):
tagname = "ignoredError"
sqref = CellRange
evalError = Bool(allow_none=True)
twoDigitTextYear = Bool(allow_none=True)
numberStoredAsText = Bool(allow_none=True)
formula = Bool(allow_none=True)
formulaRange = Bool(allow_none=True)
unlockedFormula = Bool(allow_none=True)
emptyCellReference = Bool(allow_none=True)
listDataValidation = Bool(allow_none=True)
calculatedColumn = Bool(allow_none=True)
def __init__(self,
sqref=None,
evalError=False,
twoDigitTextYear=False,
numberStoredAsText=False,
formula=False,
formulaRange=False,
unlockedFormula=False,
emptyCellReference=False,
listDataValidation=False,
calculatedColumn=False,
):
self.sqref = sqref
self.evalError = evalError
self.twoDigitTextYear = twoDigitTextYear
self.numberStoredAsText = numberStoredAsText
self.formula = formula
self.formulaRange = formulaRange
self.unlockedFormula = unlockedFormula
self.emptyCellReference = emptyCellReference
self.listDataValidation = listDataValidation
self.calculatedColumn = calculatedColumn
class IgnoredErrors(Serialisable):
tagname = "ignoredErrors"
ignoredError = Sequence(expected_type=IgnoredError)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('ignoredError', 'extLst')
def __init__(self,
ignoredError=(),
extLst=None,
):
self.ignoredError = ignoredError
self.extLst = extLst | en | 0.501669 | #Autogenerated schema # uses element group EG_ExtensionList | 2.477595 | 2 |
cwbot/kolextra/request/ItemDescriptionRequest.py | zeryl/RUcwbot | 0 | 10481 | <gh_stars>0
from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import re
class ItemDescriptionRequest(GenericRequest):
"Gets the description of an item and then parses various information from the response."
_itemIdPattern = re.compile(r'(?i)<!--\s*itemid:\s*(\d+)\s*-->')
def __init__(self, session, descId):
super(ItemDescriptionRequest, self).__init__(session)
self.url = session.serverURL + "desc_item.php?whichitem=%s" % descId
def parseResponse(self):
# Get the item name.
itemNamePattern = PatternManager.getOrCompilePattern("itemName")
match = itemNamePattern.search(self.responseText)
self.responseData["name"] = match.group(1)
# Get the item image.
imagePattern = PatternManager.getOrCompilePattern("itemImage")
match = imagePattern.search(self.responseText)
self.responseData["image"] = match.group(1)
# Get the item type.
typePattern = PatternManager.getOrCompilePattern("itemType")
match = typePattern.search(self.responseText)
if match:
self.responseData["type"] = match.group(1).rstrip()
# Get the autosell value.
autosellPattern = PatternManager.getOrCompilePattern("itemAutosell")
match = autosellPattern.search(self.responseText)
if match:
self.responseData["autosell"] = int(match.group(1))
else:
self.responseData["autosell"] = 0
# See if this is a cooking ingredient.
cookingPattern = PatternManager.getOrCompilePattern("isCookingIngredient")
match = cookingPattern.search(self.responseText)
if match:
self.responseData["isCookingIngredient"] = True
# See if the item is a cocktailcrafting ingredient.
cocktailcraftingPattern = PatternManager.getOrCompilePattern("isCocktailcraftingIngredient")
match = cocktailcraftingPattern.search(self.responseText)
if match:
self.responseData["isCocktailcraftingIngredient"] = True
# See if the item is a meatsmithing component.
meatsmithingPattern = PatternManager.getOrCompilePattern("isMeatsmithingComponent")
match = meatsmithingPattern.search(self.responseText)
if match:
self.responseData["isMeatsmithingComponent"] = True
# See if the item is a jewelrymaking component.
jewelrymakingPattern = PatternManager.getOrCompilePattern("isJewelrymakingComponent")
match = jewelrymakingPattern.search(self.responseText)
if match:
self.responseData["isJewelrymakingComponent"] = True
# See if the itemId is listed
match = self._itemIdPattern.search(self.responseText)
if match:
self.responseData["id"] = int(match.group(1))
else:
self.responseData["id"] = None
| from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import re
class ItemDescriptionRequest(GenericRequest):
"Gets the description of an item and then parses various information from the response."
_itemIdPattern = re.compile(r'(?i)<!--\s*itemid:\s*(\d+)\s*-->')
def __init__(self, session, descId):
super(ItemDescriptionRequest, self).__init__(session)
self.url = session.serverURL + "desc_item.php?whichitem=%s" % descId
def parseResponse(self):
# Get the item name.
itemNamePattern = PatternManager.getOrCompilePattern("itemName")
match = itemNamePattern.search(self.responseText)
self.responseData["name"] = match.group(1)
# Get the item image.
imagePattern = PatternManager.getOrCompilePattern("itemImage")
match = imagePattern.search(self.responseText)
self.responseData["image"] = match.group(1)
# Get the item type.
typePattern = PatternManager.getOrCompilePattern("itemType")
match = typePattern.search(self.responseText)
if match:
self.responseData["type"] = match.group(1).rstrip()
# Get the autosell value.
autosellPattern = PatternManager.getOrCompilePattern("itemAutosell")
match = autosellPattern.search(self.responseText)
if match:
self.responseData["autosell"] = int(match.group(1))
else:
self.responseData["autosell"] = 0
# See if this is a cooking ingredient.
cookingPattern = PatternManager.getOrCompilePattern("isCookingIngredient")
match = cookingPattern.search(self.responseText)
if match:
self.responseData["isCookingIngredient"] = True
# See if the item is a cocktailcrafting ingredient.
cocktailcraftingPattern = PatternManager.getOrCompilePattern("isCocktailcraftingIngredient")
match = cocktailcraftingPattern.search(self.responseText)
if match:
self.responseData["isCocktailcraftingIngredient"] = True
# See if the item is a meatsmithing component.
meatsmithingPattern = PatternManager.getOrCompilePattern("isMeatsmithingComponent")
match = meatsmithingPattern.search(self.responseText)
if match:
self.responseData["isMeatsmithingComponent"] = True
# See if the item is a jewelrymaking component.
jewelrymakingPattern = PatternManager.getOrCompilePattern("isJewelrymakingComponent")
match = jewelrymakingPattern.search(self.responseText)
if match:
self.responseData["isJewelrymakingComponent"] = True
# See if the itemId is listed
match = self._itemIdPattern.search(self.responseText)
if match:
self.responseData["id"] = int(match.group(1))
else:
self.responseData["id"] = None | en | 0.486876 | # Get the item name. # Get the item image. # Get the item type. # Get the autosell value. # See if this is a cooking ingredient. # See if the item is a cocktailcrafting ingredient. # See if the item is a meatsmithing component. # See if the item is a jewelrymaking component. # See if the itemId is listed | 2.571438 | 3 |
SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 24 | 10482 | import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
| import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
| en | 0.390105 | Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3) :param: IOError
:return: error message | 3.056069 | 3 |
VokeScan.py | DaduVoke/VokeScan | 2 | 10483 | import sys,time
def sprint(str):
for c in str + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3./90)
from colorama import Fore, Back, Style
sprint (Fore.RED + "გამარჯობა. tool-ი შექმინლია ლევან ყიფიანი-DaduVoke-ის მიერ @2021")
import socket
import _thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Core(object):
ipurl=0
mode=1024
menu1=False
f=None
network_speed="სიჩქარე"
menu2=False
def GetData(self, url):
self.url = url
try:
self.ipurl = socket.gethostbyname(self.url)
except Exception as e:
print ("თქვენ არასწორად შეიყვანეთ IP ან URL")
exit(0)
Core.ipurl=self.ipurl
print (22*" ",bcolors.OKGREEN,"=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=/VokeScaner=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=",bcolors.OKGREEN)
sprint('გთხოვთ აირჩიოთ 1 ან 2')
while Core.menu1 is not True:
choice = input("\n1 - მოკლე\n2 - გრძელი\n")
if choice == "1":
Core.mode=1024
menu=True
break
elif choice == "2":
Core.mode=64000
menu = True
break
else:
sprint("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
while Core.menu2 is not True:
sprint("მეორე ეტაპი! გთხოვთ აირჩიოთ გამოყენებული ინტერნეტის სიჩქარე (0.05(1) 0.03(2))")
choice = input("\n1 - მოკლე \n2 - გრძელი\n")
if choice == "1":
Core.network_speed=0.05
menu2=True
break
elif choice == "2":
Core.network_speed=0.3
menu2 = True
break
else:
print("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
def Start_Scan(self, port_start, port_end):
Core.f = open(Core.ipurl, "a")
try:
for x in range(port_start,port_end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((Core.ipurl,x))
if res is 0:
tmp="პორტი",x,"გახსნილია", socket.getservbyport(x)
tmp1=str(tmp[0])+" "+str(tmp[1])+" "+str(tmp[2])+" "+str(tmp[3])
print(bcolors.OKGREEN,tmp1)
Core.f.write(str(tmp)+"\n")
Core.f.close()
except Exception as e:
print (e)
try:
scan = Core()
scan.GetData(input("ჩაწერეთ IP ან მისამართი URL\n"))
print(bcolors.WARNING,"სიხშირე:",Core.mode,"\n სამიზნე:",Core.ipurl,"\n სკანერის სიჩქარე:",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD,"გთხოვთ დაიცადოთ რამდენიმე წამი...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| import sys,time
def sprint(str):
for c in str + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3./90)
from colorama import Fore, Back, Style
sprint (Fore.RED + "გამარჯობა. tool-ი შექმინლია ლევან ყიფიანი-DaduVoke-ის მიერ @2021")
import socket
import _thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Core(object):
ipurl=0
mode=1024
menu1=False
f=None
network_speed="სიჩქარე"
menu2=False
def GetData(self, url):
self.url = url
try:
self.ipurl = socket.gethostbyname(self.url)
except Exception as e:
print ("თქვენ არასწორად შეიყვანეთ IP ან URL")
exit(0)
Core.ipurl=self.ipurl
print (22*" ",bcolors.OKGREEN,"=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=/VokeScaner=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=",bcolors.OKGREEN)
sprint('გთხოვთ აირჩიოთ 1 ან 2')
while Core.menu1 is not True:
choice = input("\n1 - მოკლე\n2 - გრძელი\n")
if choice == "1":
Core.mode=1024
menu=True
break
elif choice == "2":
Core.mode=64000
menu = True
break
else:
sprint("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
while Core.menu2 is not True:
sprint("მეორე ეტაპი! გთხოვთ აირჩიოთ გამოყენებული ინტერნეტის სიჩქარე (0.05(1) 0.03(2))")
choice = input("\n1 - მოკლე \n2 - გრძელი\n")
if choice == "1":
Core.network_speed=0.05
menu2=True
break
elif choice == "2":
Core.network_speed=0.3
menu2 = True
break
else:
print("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
def Start_Scan(self, port_start, port_end):
Core.f = open(Core.ipurl, "a")
try:
for x in range(port_start,port_end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((Core.ipurl,x))
if res is 0:
tmp="პორტი",x,"გახსნილია", socket.getservbyport(x)
tmp1=str(tmp[0])+" "+str(tmp[1])+" "+str(tmp[2])+" "+str(tmp[3])
print(bcolors.OKGREEN,tmp1)
Core.f.write(str(tmp)+"\n")
Core.f.close()
except Exception as e:
print (e)
try:
scan = Core()
scan.GetData(input("ჩაწერეთ IP ან მისამართი URL\n"))
print(bcolors.WARNING,"სიხშირე:",Core.mode,"\n სამიზნე:",Core.ipurl,"\n სკანერის სიჩქარე:",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD,"გთხოვთ დაიცადოთ რამდენიმე წამი...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| none | 1 | 2.771498 | 3 |
|
agent/src/clacks/agent/objects/object.py | gonicus/clacks | 2 | 10484 | <filename>agent/src/clacks/agent/objects/object.py
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The object base class.
"""
import copy
import zope.event
import pkg_resources
import os
from lxml import etree
from lxml.builder import E
from logging import getLogger
from zope.interface import Interface, implements
from clacks.common import Environment
from clacks.common.utils import N_, is_uuid
from clacks.common.components import PluginRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.agent.exceptions import ObjectException
# Status
STATUS_OK = 0
STATUS_CHANGED = 1
# Register the errors handled by us
C.register_codes(dict(
CREATE_NEEDS_BASE=N_("Creation of '%(location)s' lacks a base DN"),
READ_BACKEND_PROPERTIES=N_("Error reading properties for backend '%(backend)s'"),
ATTRIBUTE_BLOCKED_BY=N_("Attribute is blocked by %(source)s==%(value)s"),
ATTRIBUTE_READ_ONLY=N_("Attribute is read only"),
ATTRIBUTE_MANDATORY=N_("Attribute is mandatory"),
ATTRIBUTE_INVALID_CONSTANT=N_("Value is invalid - expected one of %(elements)s"),
ATTRIBUTE_INVALID_LIST=N_("Value is invalid - expected a list"),
ATTRIBUTE_INVALID=N_("Value is invalid - expected value of type '%(type)s'"),
ATTRIBUTE_CHECK_FAILED=N_("Value is invalid"),
ATTRIBUTE_NOT_UNIQUE=N_("Value is not unique (%(value)s)"),
ATTRIBUTE_NOT_FOUND=N_("Attribute not found"),
OBJECT_MODE_NOT_AVAILABLE=N_("Mode '%(mode)s' is not available for base objects"),
OBJECT_MODE_BASE_AVAILABLE=N_("Mode '%(mode)s' is only available for base objects"),
OBJECT_NOT_SUB_FOR=N_("Object of type '%(ext)s' cannot be added as to the '%(base)s' container"),
OBJECT_REMOVE_NON_BASE_OBJECT=N_("Cannot remove non base object"),
OBJECT_MOVE_NON_BASE_OBJECT=N_("Cannot move non base object"),
OBJECT_BASE_NO_RETRACT=N_("Base object cannot be retracted"),
FILTER_INVALID_KEY=N_("Invalid key '%(key)s' for filter '%(filter)s'"),
FILTER_MISSING_KEY=N_("Missing key '%(key)s' after processing filter '%(filter)s'"),
FILTER_NO_LIST=N_("Filter '%(filter)s' did not return a %(type)s value - a list was expected"),
ATTRIBUTE_DEPEND_LOOP=N_("Potential loop in attribute dependencies")
))
class Object(object):
"""
This class is the base class for all objects.
It contains getter and setter methods for the object
attributes and it is able to initialize itself by reading data from
backends.
It also contains the ability to execute the in- and out-filters for the
object properties.
All meta-classes for objects, created by the XML defintions, will inherit this class.
"""
_reg = None
_backend = None
_mode = False
_propsByBackend = {}
uuid = None
dn = None
orig_dn = None
log = None
createTimestamp = None
modifyTimestamp = None
myProperties = None
env = None
parent = None
owner = None
attributesInSaveOrder = None
def __saveOrder(self):
"""
Returns a list containing all attributes in the correct
save-order.
Due to the fact that some attributes depend on another,
we have to save some attributes first and then the others.
"""
data = self.__saveOrderHelper()
attrs = []
for level in sorted(data.keys(), reverse=True):
for attr in data[level]:
if attr not in attrs:
attrs.append(attr)
return attrs
def __saveOrderHelper(self, res=None, item=None, level=0):
"""
Helper method for '__saveOrder' to detect the dependency
depth (level) for an attribute
"""
if not res:
res = {}
if not level in res:
res[level] = []
if level == 10:
raise ValueError(C.make_error('ATTRIBUTE_DEPEND_LOOP'))
if not item:
for key in self.myProperties:
self.__saveOrderHelper(res, key, level + 1)
else:
if len(self.myProperties[item]['depends_on']):
for key in self.myProperties[item]['depends_on']:
self.__saveOrderHelper(res, key, level + 1)
res[level].append(item)
return res
def __init__(self, where=None, mode="update"):
self.env = Environment.getInstance()
# Instantiate Backend-Registry
self._reg = ObjectBackendRegistry.getInstance()
self.log = getLogger(__name__)
self.log.debug("new object instantiated '%s'" % type(self).__name__)
# Group attributes by Backend
propsByBackend = {}
props = getattr(self, '__properties')
self.myProperties = copy.deepcopy(props)
self.attributesInSaveOrder = self.__saveOrder()
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Load dynamic dropdown-values
if self.myProperties[key]['values_populate']:
cr = PluginRegistry.getInstance('CommandRegistry')
values = cr.call(self.myProperties[key]['values_populate'])
if type(values).__name__ == "dict":
self.myProperties[key]['values'] = values
else:
self.myProperties[key]['values'] = atypes['String'].convert_to(self.myProperties[key]['type'], values)
# Initialize an empty array for each backend
for be in self.myProperties[key]['backend']:
if be not in propsByBackend:
propsByBackend[be] = []
# Append property
propsByBackend[be].append(key)
self._propsByBackend = propsByBackend
self._mode = mode
# Initialize object using a DN
if where:
if mode == "create":
if is_uuid(where):
raise ValueError(C.make_error('CREATE_NEEDS_BASE', "base", location=where))
self.orig_dn = self.dn = where
else:
self._read(where)
# Set status to modified for attributes that do not have a value but are
# mandatory and have a default.
# This ensures that default values are passed to the out_filters and get saved
# afterwards.
# (Defaults will be passed to in-filters too, if they are not overwritten by _read())
for key in self.myProperties:
if not(self.myProperties[key]['value']) and self.myProperties[key]['default'] is not None and \
len(self.myProperties[key]['default']):
self.myProperties[key]['value'] = copy.deepcopy(self.myProperties[key]['default'])
if self.myProperties[key]['mandatory']:
self.myProperties[key]['status'] = STATUS_CHANGED
def set_foreign_value(self, attr, original):
self.myProperties[attr]['value'] = original['value']
self.myProperties[attr]['in_value'] = original['in_value']
self.myProperties[attr]['orig_value'] = original['orig_value']
def listProperties(self):
return self.myProperties.keys()
def getProperties(self):
return copy.deepcopy(self.myProperties)
def listMethods(self):
methods = getattr(self, '__methods')
return methods.keys()
def hasattr(self, attr):
return attr in self.myProperties
def _read(self, where):
"""
This method tries to initialize a object instance by reading data
from the defined backend.
Attributes will be grouped by their backend to ensure that only one
request per backend will be performed.
"""
# Generate missing values
if is_uuid(where):
#pylint: disable=E1101
if self._base_object:
self.dn = self._reg.uuid2dn(self._backend, where)
else:
self.dn = None
self.uuid = where
else:
self.dn = where
self.uuid = self._reg.dn2uuid(self._backend, where)
# Get last change timestamp
self.orig_dn = self.dn
if self.dn:
self.createTimestamp, self.modifyTimestamp = self._reg.get_timestamps(self._backend, self.dn)
# Load attributes for each backend.
# And then assign the values to the properties.
self.log.debug("object uuid: %s" % self.uuid)
for backend in self._propsByBackend:
try:
# Create a dictionary with all attributes we want to fetch
# {attribute_name: type, name: type}
info = dict([(k, self.myProperties[k]['backend_type']) for k in self._propsByBackend[backend]])
self.log.debug("loading attributes for backend '%s': %s" % (backend, str(info)))
be = ObjectBackendRegistry.getBackend(backend)
be_attrs = self._backendAttrs[backend] if backend in self._backendAttrs else None
attrs = be.load(self.uuid, info, be_attrs)
except ValueError as e:
raise ObjectException(C.make_error('READ_BACKEND_PROPERTIES', backend=backend))
# Assign fetched value to the properties.
for key in self._propsByBackend[backend]:
if key not in attrs:
self.log.debug("attribute '%s' was not returned by load" % key)
continue
# Keep original values, they may be overwritten in the in-filters.
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = attrs[key]
self.log.debug("%s: %s" % (key, self.myProperties[key]['value']))
# Once we've loaded all properties from the backend, execute the
# in-filters.
for key in self.myProperties:
# Skip loading in-filters for None values
if self.myProperties[key]['value'] is None:
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = []
continue
# Execute defined in-filters.
if len(self.myProperties[key]['in_filter']):
self.log.debug("found %s in-filter(s) for attribute '%s'" % (str(len(self.myProperties[key]['in_filter'])), key))
# Execute each in-filter
for in_f in self.myProperties[key]['in_filter']:
self.__processFilter(in_f, key, self.myProperties)
# Convert the received type into the target type if not done already
#pylint: disable=E1101
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Convert values from incoming backend-type to required type
if self.myProperties[key]['value']:
a_type = self.myProperties[key]['type']
be_type = self.myProperties[key]['backend_type']
# Convert all values to required type
if not atypes[a_type].is_valid_value(self.myProperties[key]['value']):
try:
self.myProperties[key]['value'] = atypes[a_type].convert_from(be_type, self.myProperties[key]['value'])
except Exception as e:
self.log.error("conversion of '%s' from '%s' to type '%s' failed: %s" % (key, be_type, a_type, str(e)))
else:
self.log.debug("converted '%s' from type '%s' to type '%s'!" % (key, be_type, a_type))
# Keep the initial value
self.myProperties[key]['last_value'] = self.myProperties[key]['orig_value'] = copy.deepcopy(self.myProperties[key]['value'])
def _delattr_(self, name):
"""
Deleter method for properties.
"""
if name in self.attributesInSaveOrder:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Do not allow remove mandatory attributes
if self.myProperties[name]['mandatory']:
raise AttributeError(C.make_error('ATTRIBUTE_MANDATORY', name))
# If not already in removed state
if len(self.myProperties[name]['value']) != 0:
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = copy.deepcopy(self.myProperties[name]['value'])
self.myProperties[name]['value'] = []
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _setattr_(self, name, value):
"""
This is the setter method for object attributes.
Each given attribute value is validated with the given set of
validators.
"""
# Store non property values
try:
object.__getattribute__(self, name)
self.__dict__[name] = value
return
except AttributeError:
pass
# A none value was passed to clear the value
if value is None:
self._delattr_(name)
return
# Try to save as property value
if name in self.myProperties:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Check if the given value has to match one out of a given list.
if len(self.myProperties[name]['values']) and value not in self.myProperties[name]['values']:
raise TypeError(C.make_error(
'ATTRIBUTE_INVALID_CONSTANT', name,
elements=", ".join(self.myProperties[name]['values'])))
# Set the new value
if self.myProperties[name]['multivalue']:
# Check if the new value is s list.
if type(value) != list:
raise TypeError(C.make_error('ATTRIBUTE_INVALID_LIST', name))
new_value = value
else:
new_value = [value]
# Eventually fixup value from incoming JSON string
s_type = self.myProperties[name]['type']
try:
new_value = self._objectFactory.getAttributeTypes()[s_type].fixup(new_value)
except Exception:
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Check if the new value is valid
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[s_type].is_valid_value(new_value):
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Validate value
if self.myProperties[name]['validator']:
props_copy = copy.deepcopy(self.myProperties)
res, error = self.__processValidator(self.myProperties[name]['validator'], name, new_value, props_copy)
if not res:
if len(error):
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED',
name, details=error))
else:
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED', name))
# Ensure that unique values stay unique. Let the backend test this.
#if self.myProperties[name]['unique']:
# backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend'])
# if not backendI.is_uniq(name, new_value):
# raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value))
# Assign the properties new value.
self.myProperties[name]['value'] = new_value
self.log.debug("updated property value of [%s|%s] %s:%s" % (type(self).__name__, self.uuid, name, new_value))
# Update status if there's a change
t = self.myProperties[name]['type']
current = copy.deepcopy(self.myProperties[name]['value'])
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[t].values_match(self.myProperties[name]['value'], self.myProperties[name]['orig_value']):
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = current
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _getattr_(self, name):
"""
The getter method object attributes.
(It differentiates between object attributes and class-members)
"""
methods = getattr(self, '__methods')
# If the requested property exists in the object-attributes, then return it.
if name in self.myProperties:
# We can have single and multivalues, return the correct type here.
value = None
if self.myProperties[name]['multivalue']:
value = self.myProperties[name]['value']
else:
if len(self.myProperties[name]['value']):
value = self.myProperties[name]['value'][0]
return value
# The requested property-name seems to be a method, return the method reference.
elif name in methods:
def m_call(*args, **kwargs):
return methods[name]['ref'](self, *args, **kwargs)
return m_call
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def getTemplate(self, theme="default"):
"""
Return the template data - if any. Else None.
"""
return Object.getNamedTemplate(self.env, self._templates, theme)
@staticmethod
def getNamedTemplate(env, templates, theme="default"):
"""
Return the template data - if any. Else None.
"""
ui = []
# If there's a template file, try to find it
if templates:
for template in templates:
path = None
# Absolute path
if template.startswith(os.path.sep):
path = template
# Relative path
else:
# Find path
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', theme, template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', theme, template)
if not os.path.exists(path):
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', "default", template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', "default", template)
if not os.path.exists(path):
return None
with open(path, "r") as f:
_ui = f.read()
# Build new merged resource element
root = etree.fromstring(_ui)
new_resources = []
resources = root.find("resources")
for include in resources.findall("include"):
rc = include.get("location")
location = os.path.join(os.path.dirname(path), rc)
if not os.path.exists(location):
raise IOError(C.make_error("NO_SUCH_RESOURCE", resource=location))
res = ""
with open(location, "r") as f:
res = f.read()
for resource in etree.fromstring(res).findall("qresource"):
files = []
prefix = resource.get("prefix")
for f in resource.findall("file"):
files.append(E.file(os.path.join(prefix, unicode(f.text))))
new_resources.append(E.resource(*files, location=rc))
root.replace(root.find("resources"), E.resources(*new_resources))
ui.append(etree.tostring(root))
return ui
def getAttrType(self, name):
"""
Return the type of a given object attribute.
"""
if name in self.myProperties:
return self.myProperties[name]['type']
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def check(self, propsFromOtherExtensions=None):
"""
Checks whether everything is fine with the extension and its given values or not.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Check if _mode matches with the current object type
#pylint: disable=E1101
if self._base_object and not self._mode in ['create', 'remove', 'update']:
raise ObjectException(C.make_error('OBJECT_MODE_NOT_AVAILABLE', mode=self._mode))
if not self._base_object and self._mode in ['create', 'remove']:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
# Check if we are allowed to create this base object on the given base
if self._base_object and self._mode == "create":
base_type = self.get_object_type_by_dn(self.dn)
if not base_type:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
if self.__class__.__name__ not in self._objectFactory.getAllowedSubElementsForObject(base_type):
raise ObjectException(C.make_error('OBJECT_NOT_SUB_FOR',
ext=self.__class__.__name__,
base=base_type))
# Transfer values form other commit processes into ourselfes
for key in self.attributesInSaveOrder:
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Transfer status into commit status
props[key]['commit_status'] = props[key]['status']
# Collect values by store and process the property filters
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Check if this attribute is blocked by another attribute and its value.
is_blocked = False
for bb in props[key]['blocked_by']:
if bb['value'] in props[bb['name']]['value']:
is_blocked = True
break
# Check if all required attributes are set. (Skip blocked once, they cannot be set!)
if not is_blocked and props[key]['mandatory'] and not len(props[key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', key))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Ensure that mandatory values are set
if props[prop_key]['mandatory'] and not len(props[prop_key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', prop_key))
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
return props
def commit(self, propsFromOtherExtensions=None):
"""
Commits changes of an object to the corresponding backends.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
self.check(propsFromOtherExtensions)
self.log.debug("saving object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Transfer status into commit status
for key in self.attributesInSaveOrder:
props[key]['commit_status'] = props[key]['status']
# Transfer values form other commit processes into ourselfes
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Adapt property states
# Run this once - If any state was adapted, then run again to ensure
# that all dependencies are processed.
first = True
_max = 5
required = False
while (first or required) and _max:
first = False
required = False
_max -= 1
for key in self.attributesInSaveOrder:
# Adapt status from dependent properties.
for propname in props[key]['depends_on']:
old = props[key]['commit_status']
props[key]['commit_status'] |= props[propname]['status'] & STATUS_CHANGED
props[key]['commit_status'] |= props[propname]['commit_status'] & STATUS_CHANGED
if props[key]['commit_status'] != old:
required = True
# Collect values by store and process the property filters
collectedAttrs = {}
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Do not save untouched values
if not props[key]['commit_status'] & STATUS_CHANGED:
continue
# Get the new value for the property and execute the out-filter
self.log.debug("changed: %s" % (key,))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
collectedAttrs[prop_key] = props[prop_key]
# Create a backend compatible list of all changed attributes.
toStore = {}
for prop_key in collectedAttrs:
# Collect properties by backend
for be in props[prop_key]['backend']:
if not be in toStore:
toStore[be] = {}
# Convert the properities type to the required format - if its not of the expected type.
be_type = collectedAttrs[prop_key]['backend_type']
s_type = collectedAttrs[prop_key]['type']
if not self._objectFactory.getAttributeTypes()[be_type].is_valid_value(collectedAttrs[prop_key]['value']):
collectedAttrs[prop_key]['value'] = self._objectFactory.getAttributeTypes()[s_type].convert_to(
be_type, collectedAttrs[prop_key]['value'])
# Append entry to the to-be-stored list
toStore[be][prop_key] = {'foreign': collectedAttrs[prop_key]['foreign'],
'orig': collectedAttrs[prop_key]['in_value'],
'value': collectedAttrs[prop_key]['value'],
'type': collectedAttrs[prop_key]['backend_type']}
# We may have a plugin without any attributes, like the group asterisk extension, in
# this case we've to update the object despite of the lack of properties.
if not len(toStore) and self._backend:
toStore[self._backend] = {}
# Leave the show if there's nothing to do
tmp = {}
for key, value in toStore.items():
# Skip NULL backend. Nothing to save, anyway.
if key == "NULL":
continue
tmp[key] = value
toStore = tmp
# Skip the whole process if there's no change at all
if not toStore:
return {}
# Update references using the toStore information
changes = {}
for be in toStore:
changes.update(toStore[be])
self.update_refs(changes)
# Handle by backend
p_backend = getattr(self, '_backend')
obj = self
zope.event.notify(ObjectChanged("pre %s" % self._mode, obj))
# Call pre-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PreCreate")
if self._mode in ["update"]:
self.__execute_hook("PreModify")
# First, take care about the primary backend...
if p_backend in toStore:
beAttrs = self._backendAttrs[p_backend] if p_backend in self._backendAttrs else {}
be = ObjectBackendRegistry.getBackend(p_backend)
if self._mode == "create":
obj.uuid = be.create(self.dn, toStore[p_backend], self._backendAttrs[p_backend])
elif self._mode == "extend":
be.extend(self.uuid, toStore[p_backend],
self._backendAttrs[p_backend],
self.getForeignProperties())
else:
be.update(self.uuid, toStore[p_backend], beAttrs)
# Eventually the DN has changed
if self._base_object:
dn = be.uuid2dn(self.uuid)
# Take DN for newly created objects
if self._mode == "create":
if self._base_object:
obj.dn = dn
elif dn != obj.dn:
self.update_dn_refs(dn)
obj.dn = dn
if self._base_object:
zope.event.notify(ObjectChanged("post move", obj))
obj.orig_dn = dn
# ... then walk thru the remaining ones
for backend, data in toStore.items():
# Skip primary backend - already done
if backend == p_backend:
continue
be = ObjectBackendRegistry.getBackend(backend)
beAttrs = self._backendAttrs[backend] if backend in self._backendAttrs else {}
if self._mode == "create":
be.create(self.dn, data, beAttrs)
elif self._mode == "extend":
be.extend(self.uuid, data, beAttrs, self.getForeignProperties())
else:
be.update(self.uuid, data, beAttrs)
zope.event.notify(ObjectChanged("post %s" % self._mode, obj))
# Call post-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PostCreate")
if self._mode in ["update"] and "PostModify":
self.__execute_hook("PostModify")
return props
def revert(self):
"""
Reverts all changes made to this object since it was loaded.
"""
for key in self.myProperties:
self.myProperties[key]['value'] = self.myProperties[key]['last_value']
self.log.debug("reverted object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
def getExclusiveProperties(self):
return [x for x, y in self.myProperties.items() if not y['foreign']]
def getForeignProperties(self):
return [x for x, y in self.myProperties.items() if y['foreign']]
def __processValidator(self, fltr, key, value, props_copy):
"""
This method processes a given process-list (fltr) for a given property (prop).
And return TRUE if the value matches the validator set and FALSE if
not.
"""
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
self.log.debug(" validator started (%s)" % key)
self.log.debug(" value: %s" % (value, ))
# Process the list till we reach the end..
lasterrmsg = ""
errormsgs = []
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
if 'condition' in curline:
# Build up argument list
args = [props_copy, key, value] + curline['params']
# Process condition and keep results
fname = type(curline['condition']).__name__
v, errors = (curline['condition']).process(*args)
# Log what happend!
self.log.debug(" %s: [Filter] %s(%s) called and returned: %s" % (
lptr, fname, ", ".join(["\"" + x + "\"" for x in curline['params']]), v))
# Append the result to the stack.
stack.append(v)
if not v:
if len(errors):
lasterrmsg = errors.pop()
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
v1 = stack.pop()
v2 = stack.pop()
fname = type(curline['operator']).__name__
res = (curline['operator']).process(v1, v2)
stack.append(res)
# Add last error message
if not res:
errormsgs.append(lasterrmsg)
lasterrmsg = ""
# Log what happend!
self.log.debug(" %s: [OPERATOR] %s(%s, %s) called and returned: %s" % (
lptr, fname, v1, v2, res))
# Attach last error message
res = stack.pop()
if not res and lasterrmsg != "":
errormsgs.append(lasterrmsg)
self.log.debug(" <- VALIDATOR ENDED (%s)" % key)
return res, errormsgs
def __processFilter(self, fltr, key, prop):
"""
This method processes a given process-list (fltr) for a given property (prop).
For example: When a property has to be stored in the backend, it will
run through the out-filter-process-list and thus will be transformed into a storable
key, value pair.
"""
# Search for replaceable patterns in the process-list.
fltr = self.__fillInPlaceholders(fltr, prop)
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
# Log values
self.log.debug(" -> FILTER STARTED (%s)" % key)
# Process the list till we reach the end..
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A filter is used to manipulate the 'value' or the 'key' or maybe both.
if 'filter' in curline:
# Build up argument list
args = [self, key, prop]
fname = type(curline['filter']).__name__
for entry in curline['params']:
args.append(entry)
# Process filter and keep results
key, prop = (curline['filter']).process(*args)
# Ensure that the processed data is still valid.
# Filter may mess things up and then the next cannot process correctly.
if key not in prop:
raise ObjectException(C.make_error('FILTER_INVALID_KEY',
key=key, filter=fname))
# Check if the filter returned all expected property values.
for pk in prop:
if not all(k in prop[pk] for k in ('backend', 'value', 'type')):
missing = ", ".join({'backend', 'value', 'type'} - set(prop[pk].keys()))
raise ObjectException(C.make_error('FILTER_MISSING_KEY', key=missing, filter=fname))
# Check if the returned value-type is list or None.
if type(prop[pk]['value']) not in [list, type(None)]:
raise ObjectException(C.make_error('FILTER_NO_LIST',
key=pk, filter=fname, type=type(prop[pk]['value'])))
self.log.debug(" %s: [Filter] %s(%s) called " % (lptr, fname,
", ".join(["\"" + x + "\"" for x in curline['params']])))
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
elif 'condition' in curline:
# Build up argument list
args = [key] + curline['params']
# Process condition and keep results
stack.append((curline['condition']).process(*args))
fname = type(curline['condition']).__name__
self.log.debug(" %s: [Condition] %s(%s) called " % (lptr, fname, ", ".join(curline['params'])))
# Handle jump, for example if a condition has failed, jump over its filter-chain.
elif 'jump' in curline:
# Jump to <line> -1 because we will increase the line ptr later.
olptr = lptr
if curline['jump'] == 'conditional':
if stack.pop():
lptr = curline['onTrue'] - 1
else:
lptr = curline['onFalse'] - 1
else:
lptr = curline['to'] - 1
self.log.debug(" %s: [Goto] %s ()" % (olptr, lptr))
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
a = stack.pop()
b = stack.pop()
stack.append((curline['operator']).process(a, b))
fname = type(curline['operator']).__name__
self.log.debug(" %s: [Condition] %s(%s, %s) called " % (lptr, fname, a, b))
# Log current values
#self.log.debug(" result")
#for pkey in prop:
# self.log.debug(" %s: %s" % (pkey, prop[pkey]['value']))
self.log.debug(" <- FILTER ENDED")
return prop
def __fillInPlaceholders(self, fltr, props):
"""
This method fill in placeholder into in- and out-filters.
"""
# Collect all property values
propList = {}
for key in props:
if props[key]['multivalue']:
propList[key] = props[key]['value']
else:
if props[key]['value'] and len(props[key]['value']):
propList[key] = props[key]['value'][0]
else:
propList[key] = None
# An inline function which replaces format string tokens
def _placeHolder(x):
try:
x = x % propList
except KeyError:
pass
return x
# Walk trough each line of the process list an replace placeholders.
for line in fltr:
if 'params' in fltr[line]:
fltr[line]['params'] = map(_placeHolder,
fltr[line]['params'])
return fltr
def get_object_type_by_dn(self, dn):
"""
Returns the objectType for a given DN
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn}, {'_type': 1})
return res[0]['_type'] if res.count() == 1 else None
def get_references(self, override=None):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for ref, info in self._objectFactory.getReferences(override or self.__class__.__name__).items():
for ref_attribute, dsc in info.items():
for idsc in dsc:
if self.myProperties[idsc[1]]['orig_value'] and len(self.myProperties[idsc[1]]['orig_value']):
oval = self.myProperties[idsc[1]]['orig_value'][0]
else:
oval = None
dns = index.search({'_type': ref, ref_attribute: oval}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
idsc[1],
getattr(self, idsc[1]),
dns or [],
self.myProperties[idsc[1]]['multivalue']))
return res
def update_refs(self, data):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
# Next iterration if there's no change for the relevant
# attribute
if not self_attr in data:
continue
# Load object and change value to the new one
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
o_value = data[self_attr]['orig']
if type(c_value) == list:
if type(o_value) == list:
c_value = filter(lambda x: x not in o_value, c_value)
else:
c_value = filter(lambda x: x != o_value, c_value)
if multivalue:
c_value.append(data[self_attr]['value'])
else:
c_value.append(data[self_attr]['value'][0])
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, data[self_attr]['value'][0])
c_obj.commit()
def remove_refs(self):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
if type(value) == list:
c_value = filter(lambda x: x not in value, c_value)
else:
c_value = filter(lambda x: x != value, c_value)
setattr(c_obj, ref_attr, c_value)
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def get_dn_references(self):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for info in self._objectFactory.getReferences("*", "dn").values():
for ref_attribute in info.keys():
dns = index.search({ref_attribute: self.dn}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
map(lambda s: s.decode('utf-8'), dns if dns else [])
))
return res
def update_dn_refs(self, new_dn):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
c_value.append(new_dn)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, new_dn)
c_obj.commit()
def remove_dn_refs(self):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def remove(self):
"""
Removes this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_REMOVE_NON_BASE_OBJECT'))
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Remove for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre remove", obj))
# Call pre-remove now
self.__execute_hook("PreRemove")
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.remove(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post remove", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def simulate_move(self, orig_dn):
"""
Simulate a moves for this object
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
obj = self
zope.event.notify(ObjectChanged("pre move", obj, dn=self.dn, orig_dn=orig_dn))
# Update the DN refs which have most probably changed
self.update_dn_refs(self.dn)
zope.event.notify(ObjectChanged("post move", obj, dn=self.dn, orig_dn=orig_dn))
def move(self, new_base):
"""
Moves this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
# Collect backends
backends = [getattr(self, '_backend')]
# Collect all other backends
for info in self.myProperties.values():
for be in info['backend']:
if not be in backends:
backends.append(be)
obj = self
zope.event.notify(ObjectChanged("pre move", obj))
# Move for primary backend
be = ObjectBackendRegistry.getBackend(backends[0])
be.move(self.uuid, new_base)
# Update the DN refs which have most probably changed
p_backend = getattr(self, '_backend')
be = ObjectBackendRegistry.getBackend(p_backend)
dn = be.uuid2dn(self.uuid)
self.update_dn_refs(dn)
zope.event.notify(ObjectChanged("post move", obj, dn=dn))
def retract(self):
"""
Removes this object extension
"""
#pylint: disable=E1101
if self._base_object:
raise ObjectException(C.make_error('OBJECT_BASE_NO_RETRACT'))
# Call pre-remove now
self.__execute_hook("PreRemove")
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Retract for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre retract", obj))
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.retract(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post retract", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def is_attr_set(self, name):
return len(self.myProperties[name]['in_value'])
def is_attr_using_default(self, name):
return not self.is_attr_set(name) and self.myProperties[name]['default']
def __execute_hook(self, hook_type):
# Call post-remove now
hooks = getattr(self, '__hooks')
if hook_type in hooks:
for hook in hooks[hook_type]:
hook["ref"](self)
class IObjectChanged(Interface):
def __init__(self, obj):
pass
class IAttributeChanged(Interface):
def __init__(self, attr, value):
pass
class ObjectChanged(object):
implements(IObjectChanged)
def __init__(self, reason, obj=None, dn=None, uuid=None, orig_dn=None, o_type=None):
self.reason = reason
self.uuid = uuid or obj.uuid
self.dn = dn or obj.dn
self.orig_dn = orig_dn or obj.orig_dn
self.o_type = o_type or obj.__class__.__name__
class AttributeChanged(object):
implements(IAttributeChanged)
def __init__(self, reason, obj, target):
self.reason = reason
self.target = target
self.uuid = obj.uuid
from clacks.agent.objects.proxy import ObjectProxy
| <filename>agent/src/clacks/agent/objects/object.py
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The object base class.
"""
import copy
import zope.event
import pkg_resources
import os
from lxml import etree
from lxml.builder import E
from logging import getLogger
from zope.interface import Interface, implements
from clacks.common import Environment
from clacks.common.utils import N_, is_uuid
from clacks.common.components import PluginRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.agent.exceptions import ObjectException
# Status
STATUS_OK = 0
STATUS_CHANGED = 1
# Register the errors handled by us
C.register_codes(dict(
CREATE_NEEDS_BASE=N_("Creation of '%(location)s' lacks a base DN"),
READ_BACKEND_PROPERTIES=N_("Error reading properties for backend '%(backend)s'"),
ATTRIBUTE_BLOCKED_BY=N_("Attribute is blocked by %(source)s==%(value)s"),
ATTRIBUTE_READ_ONLY=N_("Attribute is read only"),
ATTRIBUTE_MANDATORY=N_("Attribute is mandatory"),
ATTRIBUTE_INVALID_CONSTANT=N_("Value is invalid - expected one of %(elements)s"),
ATTRIBUTE_INVALID_LIST=N_("Value is invalid - expected a list"),
ATTRIBUTE_INVALID=N_("Value is invalid - expected value of type '%(type)s'"),
ATTRIBUTE_CHECK_FAILED=N_("Value is invalid"),
ATTRIBUTE_NOT_UNIQUE=N_("Value is not unique (%(value)s)"),
ATTRIBUTE_NOT_FOUND=N_("Attribute not found"),
OBJECT_MODE_NOT_AVAILABLE=N_("Mode '%(mode)s' is not available for base objects"),
OBJECT_MODE_BASE_AVAILABLE=N_("Mode '%(mode)s' is only available for base objects"),
OBJECT_NOT_SUB_FOR=N_("Object of type '%(ext)s' cannot be added as to the '%(base)s' container"),
OBJECT_REMOVE_NON_BASE_OBJECT=N_("Cannot remove non base object"),
OBJECT_MOVE_NON_BASE_OBJECT=N_("Cannot move non base object"),
OBJECT_BASE_NO_RETRACT=N_("Base object cannot be retracted"),
FILTER_INVALID_KEY=N_("Invalid key '%(key)s' for filter '%(filter)s'"),
FILTER_MISSING_KEY=N_("Missing key '%(key)s' after processing filter '%(filter)s'"),
FILTER_NO_LIST=N_("Filter '%(filter)s' did not return a %(type)s value - a list was expected"),
ATTRIBUTE_DEPEND_LOOP=N_("Potential loop in attribute dependencies")
))
class Object(object):
"""
This class is the base class for all objects.
It contains getter and setter methods for the object
attributes and it is able to initialize itself by reading data from
backends.
It also contains the ability to execute the in- and out-filters for the
object properties.
All meta-classes for objects, created by the XML defintions, will inherit this class.
"""
_reg = None
_backend = None
_mode = False
_propsByBackend = {}
uuid = None
dn = None
orig_dn = None
log = None
createTimestamp = None
modifyTimestamp = None
myProperties = None
env = None
parent = None
owner = None
attributesInSaveOrder = None
def __saveOrder(self):
"""
Returns a list containing all attributes in the correct
save-order.
Due to the fact that some attributes depend on another,
we have to save some attributes first and then the others.
"""
data = self.__saveOrderHelper()
attrs = []
for level in sorted(data.keys(), reverse=True):
for attr in data[level]:
if attr not in attrs:
attrs.append(attr)
return attrs
def __saveOrderHelper(self, res=None, item=None, level=0):
"""
Helper method for '__saveOrder' to detect the dependency
depth (level) for an attribute
"""
if not res:
res = {}
if not level in res:
res[level] = []
if level == 10:
raise ValueError(C.make_error('ATTRIBUTE_DEPEND_LOOP'))
if not item:
for key in self.myProperties:
self.__saveOrderHelper(res, key, level + 1)
else:
if len(self.myProperties[item]['depends_on']):
for key in self.myProperties[item]['depends_on']:
self.__saveOrderHelper(res, key, level + 1)
res[level].append(item)
return res
def __init__(self, where=None, mode="update"):
self.env = Environment.getInstance()
# Instantiate Backend-Registry
self._reg = ObjectBackendRegistry.getInstance()
self.log = getLogger(__name__)
self.log.debug("new object instantiated '%s'" % type(self).__name__)
# Group attributes by Backend
propsByBackend = {}
props = getattr(self, '__properties')
self.myProperties = copy.deepcopy(props)
self.attributesInSaveOrder = self.__saveOrder()
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Load dynamic dropdown-values
if self.myProperties[key]['values_populate']:
cr = PluginRegistry.getInstance('CommandRegistry')
values = cr.call(self.myProperties[key]['values_populate'])
if type(values).__name__ == "dict":
self.myProperties[key]['values'] = values
else:
self.myProperties[key]['values'] = atypes['String'].convert_to(self.myProperties[key]['type'], values)
# Initialize an empty array for each backend
for be in self.myProperties[key]['backend']:
if be not in propsByBackend:
propsByBackend[be] = []
# Append property
propsByBackend[be].append(key)
self._propsByBackend = propsByBackend
self._mode = mode
# Initialize object using a DN
if where:
if mode == "create":
if is_uuid(where):
raise ValueError(C.make_error('CREATE_NEEDS_BASE', "base", location=where))
self.orig_dn = self.dn = where
else:
self._read(where)
# Set status to modified for attributes that do not have a value but are
# mandatory and have a default.
# This ensures that default values are passed to the out_filters and get saved
# afterwards.
# (Defaults will be passed to in-filters too, if they are not overwritten by _read())
for key in self.myProperties:
if not(self.myProperties[key]['value']) and self.myProperties[key]['default'] is not None and \
len(self.myProperties[key]['default']):
self.myProperties[key]['value'] = copy.deepcopy(self.myProperties[key]['default'])
if self.myProperties[key]['mandatory']:
self.myProperties[key]['status'] = STATUS_CHANGED
def set_foreign_value(self, attr, original):
self.myProperties[attr]['value'] = original['value']
self.myProperties[attr]['in_value'] = original['in_value']
self.myProperties[attr]['orig_value'] = original['orig_value']
def listProperties(self):
return self.myProperties.keys()
def getProperties(self):
return copy.deepcopy(self.myProperties)
def listMethods(self):
methods = getattr(self, '__methods')
return methods.keys()
def hasattr(self, attr):
return attr in self.myProperties
def _read(self, where):
"""
This method tries to initialize a object instance by reading data
from the defined backend.
Attributes will be grouped by their backend to ensure that only one
request per backend will be performed.
"""
# Generate missing values
if is_uuid(where):
#pylint: disable=E1101
if self._base_object:
self.dn = self._reg.uuid2dn(self._backend, where)
else:
self.dn = None
self.uuid = where
else:
self.dn = where
self.uuid = self._reg.dn2uuid(self._backend, where)
# Get last change timestamp
self.orig_dn = self.dn
if self.dn:
self.createTimestamp, self.modifyTimestamp = self._reg.get_timestamps(self._backend, self.dn)
# Load attributes for each backend.
# And then assign the values to the properties.
self.log.debug("object uuid: %s" % self.uuid)
for backend in self._propsByBackend:
try:
# Create a dictionary with all attributes we want to fetch
# {attribute_name: type, name: type}
info = dict([(k, self.myProperties[k]['backend_type']) for k in self._propsByBackend[backend]])
self.log.debug("loading attributes for backend '%s': %s" % (backend, str(info)))
be = ObjectBackendRegistry.getBackend(backend)
be_attrs = self._backendAttrs[backend] if backend in self._backendAttrs else None
attrs = be.load(self.uuid, info, be_attrs)
except ValueError as e:
raise ObjectException(C.make_error('READ_BACKEND_PROPERTIES', backend=backend))
# Assign fetched value to the properties.
for key in self._propsByBackend[backend]:
if key not in attrs:
self.log.debug("attribute '%s' was not returned by load" % key)
continue
# Keep original values, they may be overwritten in the in-filters.
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = attrs[key]
self.log.debug("%s: %s" % (key, self.myProperties[key]['value']))
# Once we've loaded all properties from the backend, execute the
# in-filters.
for key in self.myProperties:
# Skip loading in-filters for None values
if self.myProperties[key]['value'] is None:
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = []
continue
# Execute defined in-filters.
if len(self.myProperties[key]['in_filter']):
self.log.debug("found %s in-filter(s) for attribute '%s'" % (str(len(self.myProperties[key]['in_filter'])), key))
# Execute each in-filter
for in_f in self.myProperties[key]['in_filter']:
self.__processFilter(in_f, key, self.myProperties)
# Convert the received type into the target type if not done already
#pylint: disable=E1101
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Convert values from incoming backend-type to required type
if self.myProperties[key]['value']:
a_type = self.myProperties[key]['type']
be_type = self.myProperties[key]['backend_type']
# Convert all values to required type
if not atypes[a_type].is_valid_value(self.myProperties[key]['value']):
try:
self.myProperties[key]['value'] = atypes[a_type].convert_from(be_type, self.myProperties[key]['value'])
except Exception as e:
self.log.error("conversion of '%s' from '%s' to type '%s' failed: %s" % (key, be_type, a_type, str(e)))
else:
self.log.debug("converted '%s' from type '%s' to type '%s'!" % (key, be_type, a_type))
# Keep the initial value
self.myProperties[key]['last_value'] = self.myProperties[key]['orig_value'] = copy.deepcopy(self.myProperties[key]['value'])
def _delattr_(self, name):
"""
Deleter method for properties.
"""
if name in self.attributesInSaveOrder:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Do not allow remove mandatory attributes
if self.myProperties[name]['mandatory']:
raise AttributeError(C.make_error('ATTRIBUTE_MANDATORY', name))
# If not already in removed state
if len(self.myProperties[name]['value']) != 0:
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = copy.deepcopy(self.myProperties[name]['value'])
self.myProperties[name]['value'] = []
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _setattr_(self, name, value):
"""
This is the setter method for object attributes.
Each given attribute value is validated with the given set of
validators.
"""
# Store non property values
try:
object.__getattribute__(self, name)
self.__dict__[name] = value
return
except AttributeError:
pass
# A none value was passed to clear the value
if value is None:
self._delattr_(name)
return
# Try to save as property value
if name in self.myProperties:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Check if the given value has to match one out of a given list.
if len(self.myProperties[name]['values']) and value not in self.myProperties[name]['values']:
raise TypeError(C.make_error(
'ATTRIBUTE_INVALID_CONSTANT', name,
elements=", ".join(self.myProperties[name]['values'])))
# Set the new value
if self.myProperties[name]['multivalue']:
# Check if the new value is s list.
if type(value) != list:
raise TypeError(C.make_error('ATTRIBUTE_INVALID_LIST', name))
new_value = value
else:
new_value = [value]
# Eventually fixup value from incoming JSON string
s_type = self.myProperties[name]['type']
try:
new_value = self._objectFactory.getAttributeTypes()[s_type].fixup(new_value)
except Exception:
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Check if the new value is valid
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[s_type].is_valid_value(new_value):
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Validate value
if self.myProperties[name]['validator']:
props_copy = copy.deepcopy(self.myProperties)
res, error = self.__processValidator(self.myProperties[name]['validator'], name, new_value, props_copy)
if not res:
if len(error):
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED',
name, details=error))
else:
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED', name))
# Ensure that unique values stay unique. Let the backend test this.
#if self.myProperties[name]['unique']:
# backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend'])
# if not backendI.is_uniq(name, new_value):
# raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value))
# Assign the properties new value.
self.myProperties[name]['value'] = new_value
self.log.debug("updated property value of [%s|%s] %s:%s" % (type(self).__name__, self.uuid, name, new_value))
# Update status if there's a change
t = self.myProperties[name]['type']
current = copy.deepcopy(self.myProperties[name]['value'])
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[t].values_match(self.myProperties[name]['value'], self.myProperties[name]['orig_value']):
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = current
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _getattr_(self, name):
"""
The getter method object attributes.
(It differentiates between object attributes and class-members)
"""
methods = getattr(self, '__methods')
# If the requested property exists in the object-attributes, then return it.
if name in self.myProperties:
# We can have single and multivalues, return the correct type here.
value = None
if self.myProperties[name]['multivalue']:
value = self.myProperties[name]['value']
else:
if len(self.myProperties[name]['value']):
value = self.myProperties[name]['value'][0]
return value
# The requested property-name seems to be a method, return the method reference.
elif name in methods:
def m_call(*args, **kwargs):
return methods[name]['ref'](self, *args, **kwargs)
return m_call
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def getTemplate(self, theme="default"):
"""
Return the template data - if any. Else None.
"""
return Object.getNamedTemplate(self.env, self._templates, theme)
@staticmethod
def getNamedTemplate(env, templates, theme="default"):
"""
Return the template data - if any. Else None.
"""
ui = []
# If there's a template file, try to find it
if templates:
for template in templates:
path = None
# Absolute path
if template.startswith(os.path.sep):
path = template
# Relative path
else:
# Find path
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', theme, template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', theme, template)
if not os.path.exists(path):
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', "default", template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', "default", template)
if not os.path.exists(path):
return None
with open(path, "r") as f:
_ui = f.read()
# Build new merged resource element
root = etree.fromstring(_ui)
new_resources = []
resources = root.find("resources")
for include in resources.findall("include"):
rc = include.get("location")
location = os.path.join(os.path.dirname(path), rc)
if not os.path.exists(location):
raise IOError(C.make_error("NO_SUCH_RESOURCE", resource=location))
res = ""
with open(location, "r") as f:
res = f.read()
for resource in etree.fromstring(res).findall("qresource"):
files = []
prefix = resource.get("prefix")
for f in resource.findall("file"):
files.append(E.file(os.path.join(prefix, unicode(f.text))))
new_resources.append(E.resource(*files, location=rc))
root.replace(root.find("resources"), E.resources(*new_resources))
ui.append(etree.tostring(root))
return ui
def getAttrType(self, name):
"""
Return the type of a given object attribute.
"""
if name in self.myProperties:
return self.myProperties[name]['type']
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def check(self, propsFromOtherExtensions=None):
"""
Checks whether everything is fine with the extension and its given values or not.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Check if _mode matches with the current object type
#pylint: disable=E1101
if self._base_object and not self._mode in ['create', 'remove', 'update']:
raise ObjectException(C.make_error('OBJECT_MODE_NOT_AVAILABLE', mode=self._mode))
if not self._base_object and self._mode in ['create', 'remove']:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
# Check if we are allowed to create this base object on the given base
if self._base_object and self._mode == "create":
base_type = self.get_object_type_by_dn(self.dn)
if not base_type:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
if self.__class__.__name__ not in self._objectFactory.getAllowedSubElementsForObject(base_type):
raise ObjectException(C.make_error('OBJECT_NOT_SUB_FOR',
ext=self.__class__.__name__,
base=base_type))
# Transfer values form other commit processes into ourselfes
for key in self.attributesInSaveOrder:
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Transfer status into commit status
props[key]['commit_status'] = props[key]['status']
# Collect values by store and process the property filters
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Check if this attribute is blocked by another attribute and its value.
is_blocked = False
for bb in props[key]['blocked_by']:
if bb['value'] in props[bb['name']]['value']:
is_blocked = True
break
# Check if all required attributes are set. (Skip blocked once, they cannot be set!)
if not is_blocked and props[key]['mandatory'] and not len(props[key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', key))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Ensure that mandatory values are set
if props[prop_key]['mandatory'] and not len(props[prop_key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', prop_key))
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
return props
def commit(self, propsFromOtherExtensions=None):
"""
Commits changes of an object to the corresponding backends.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
self.check(propsFromOtherExtensions)
self.log.debug("saving object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Transfer status into commit status
for key in self.attributesInSaveOrder:
props[key]['commit_status'] = props[key]['status']
# Transfer values form other commit processes into ourselfes
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Adapt property states
# Run this once - If any state was adapted, then run again to ensure
# that all dependencies are processed.
first = True
_max = 5
required = False
while (first or required) and _max:
first = False
required = False
_max -= 1
for key in self.attributesInSaveOrder:
# Adapt status from dependent properties.
for propname in props[key]['depends_on']:
old = props[key]['commit_status']
props[key]['commit_status'] |= props[propname]['status'] & STATUS_CHANGED
props[key]['commit_status'] |= props[propname]['commit_status'] & STATUS_CHANGED
if props[key]['commit_status'] != old:
required = True
# Collect values by store and process the property filters
collectedAttrs = {}
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Do not save untouched values
if not props[key]['commit_status'] & STATUS_CHANGED:
continue
# Get the new value for the property and execute the out-filter
self.log.debug("changed: %s" % (key,))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
collectedAttrs[prop_key] = props[prop_key]
# Create a backend compatible list of all changed attributes.
toStore = {}
for prop_key in collectedAttrs:
# Collect properties by backend
for be in props[prop_key]['backend']:
if not be in toStore:
toStore[be] = {}
# Convert the properities type to the required format - if its not of the expected type.
be_type = collectedAttrs[prop_key]['backend_type']
s_type = collectedAttrs[prop_key]['type']
if not self._objectFactory.getAttributeTypes()[be_type].is_valid_value(collectedAttrs[prop_key]['value']):
collectedAttrs[prop_key]['value'] = self._objectFactory.getAttributeTypes()[s_type].convert_to(
be_type, collectedAttrs[prop_key]['value'])
# Append entry to the to-be-stored list
toStore[be][prop_key] = {'foreign': collectedAttrs[prop_key]['foreign'],
'orig': collectedAttrs[prop_key]['in_value'],
'value': collectedAttrs[prop_key]['value'],
'type': collectedAttrs[prop_key]['backend_type']}
# We may have a plugin without any attributes, like the group asterisk extension, in
# this case we've to update the object despite of the lack of properties.
if not len(toStore) and self._backend:
toStore[self._backend] = {}
# Leave the show if there's nothing to do
tmp = {}
for key, value in toStore.items():
# Skip NULL backend. Nothing to save, anyway.
if key == "NULL":
continue
tmp[key] = value
toStore = tmp
# Skip the whole process if there's no change at all
if not toStore:
return {}
# Update references using the toStore information
changes = {}
for be in toStore:
changes.update(toStore[be])
self.update_refs(changes)
# Handle by backend
p_backend = getattr(self, '_backend')
obj = self
zope.event.notify(ObjectChanged("pre %s" % self._mode, obj))
# Call pre-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PreCreate")
if self._mode in ["update"]:
self.__execute_hook("PreModify")
# First, take care about the primary backend...
if p_backend in toStore:
beAttrs = self._backendAttrs[p_backend] if p_backend in self._backendAttrs else {}
be = ObjectBackendRegistry.getBackend(p_backend)
if self._mode == "create":
obj.uuid = be.create(self.dn, toStore[p_backend], self._backendAttrs[p_backend])
elif self._mode == "extend":
be.extend(self.uuid, toStore[p_backend],
self._backendAttrs[p_backend],
self.getForeignProperties())
else:
be.update(self.uuid, toStore[p_backend], beAttrs)
# Eventually the DN has changed
if self._base_object:
dn = be.uuid2dn(self.uuid)
# Take DN for newly created objects
if self._mode == "create":
if self._base_object:
obj.dn = dn
elif dn != obj.dn:
self.update_dn_refs(dn)
obj.dn = dn
if self._base_object:
zope.event.notify(ObjectChanged("post move", obj))
obj.orig_dn = dn
# ... then walk thru the remaining ones
for backend, data in toStore.items():
# Skip primary backend - already done
if backend == p_backend:
continue
be = ObjectBackendRegistry.getBackend(backend)
beAttrs = self._backendAttrs[backend] if backend in self._backendAttrs else {}
if self._mode == "create":
be.create(self.dn, data, beAttrs)
elif self._mode == "extend":
be.extend(self.uuid, data, beAttrs, self.getForeignProperties())
else:
be.update(self.uuid, data, beAttrs)
zope.event.notify(ObjectChanged("post %s" % self._mode, obj))
# Call post-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PostCreate")
if self._mode in ["update"] and "PostModify":
self.__execute_hook("PostModify")
return props
def revert(self):
"""
Reverts all changes made to this object since it was loaded.
"""
for key in self.myProperties:
self.myProperties[key]['value'] = self.myProperties[key]['last_value']
self.log.debug("reverted object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
def getExclusiveProperties(self):
return [x for x, y in self.myProperties.items() if not y['foreign']]
def getForeignProperties(self):
return [x for x, y in self.myProperties.items() if y['foreign']]
def __processValidator(self, fltr, key, value, props_copy):
"""
This method processes a given process-list (fltr) for a given property (prop).
And return TRUE if the value matches the validator set and FALSE if
not.
"""
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
self.log.debug(" validator started (%s)" % key)
self.log.debug(" value: %s" % (value, ))
# Process the list till we reach the end..
lasterrmsg = ""
errormsgs = []
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
if 'condition' in curline:
# Build up argument list
args = [props_copy, key, value] + curline['params']
# Process condition and keep results
fname = type(curline['condition']).__name__
v, errors = (curline['condition']).process(*args)
# Log what happend!
self.log.debug(" %s: [Filter] %s(%s) called and returned: %s" % (
lptr, fname, ", ".join(["\"" + x + "\"" for x in curline['params']]), v))
# Append the result to the stack.
stack.append(v)
if not v:
if len(errors):
lasterrmsg = errors.pop()
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
v1 = stack.pop()
v2 = stack.pop()
fname = type(curline['operator']).__name__
res = (curline['operator']).process(v1, v2)
stack.append(res)
# Add last error message
if not res:
errormsgs.append(lasterrmsg)
lasterrmsg = ""
# Log what happend!
self.log.debug(" %s: [OPERATOR] %s(%s, %s) called and returned: %s" % (
lptr, fname, v1, v2, res))
# Attach last error message
res = stack.pop()
if not res and lasterrmsg != "":
errormsgs.append(lasterrmsg)
self.log.debug(" <- VALIDATOR ENDED (%s)" % key)
return res, errormsgs
def __processFilter(self, fltr, key, prop):
"""
This method processes a given process-list (fltr) for a given property (prop).
For example: When a property has to be stored in the backend, it will
run through the out-filter-process-list and thus will be transformed into a storable
key, value pair.
"""
# Search for replaceable patterns in the process-list.
fltr = self.__fillInPlaceholders(fltr, prop)
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
# Log values
self.log.debug(" -> FILTER STARTED (%s)" % key)
# Process the list till we reach the end..
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A filter is used to manipulate the 'value' or the 'key' or maybe both.
if 'filter' in curline:
# Build up argument list
args = [self, key, prop]
fname = type(curline['filter']).__name__
for entry in curline['params']:
args.append(entry)
# Process filter and keep results
key, prop = (curline['filter']).process(*args)
# Ensure that the processed data is still valid.
# Filter may mess things up and then the next cannot process correctly.
if key not in prop:
raise ObjectException(C.make_error('FILTER_INVALID_KEY',
key=key, filter=fname))
# Check if the filter returned all expected property values.
for pk in prop:
if not all(k in prop[pk] for k in ('backend', 'value', 'type')):
missing = ", ".join({'backend', 'value', 'type'} - set(prop[pk].keys()))
raise ObjectException(C.make_error('FILTER_MISSING_KEY', key=missing, filter=fname))
# Check if the returned value-type is list or None.
if type(prop[pk]['value']) not in [list, type(None)]:
raise ObjectException(C.make_error('FILTER_NO_LIST',
key=pk, filter=fname, type=type(prop[pk]['value'])))
self.log.debug(" %s: [Filter] %s(%s) called " % (lptr, fname,
", ".join(["\"" + x + "\"" for x in curline['params']])))
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
elif 'condition' in curline:
# Build up argument list
args = [key] + curline['params']
# Process condition and keep results
stack.append((curline['condition']).process(*args))
fname = type(curline['condition']).__name__
self.log.debug(" %s: [Condition] %s(%s) called " % (lptr, fname, ", ".join(curline['params'])))
# Handle jump, for example if a condition has failed, jump over its filter-chain.
elif 'jump' in curline:
# Jump to <line> -1 because we will increase the line ptr later.
olptr = lptr
if curline['jump'] == 'conditional':
if stack.pop():
lptr = curline['onTrue'] - 1
else:
lptr = curline['onFalse'] - 1
else:
lptr = curline['to'] - 1
self.log.debug(" %s: [Goto] %s ()" % (olptr, lptr))
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
a = stack.pop()
b = stack.pop()
stack.append((curline['operator']).process(a, b))
fname = type(curline['operator']).__name__
self.log.debug(" %s: [Condition] %s(%s, %s) called " % (lptr, fname, a, b))
# Log current values
#self.log.debug(" result")
#for pkey in prop:
# self.log.debug(" %s: %s" % (pkey, prop[pkey]['value']))
self.log.debug(" <- FILTER ENDED")
return prop
def __fillInPlaceholders(self, fltr, props):
"""
This method fill in placeholder into in- and out-filters.
"""
# Collect all property values
propList = {}
for key in props:
if props[key]['multivalue']:
propList[key] = props[key]['value']
else:
if props[key]['value'] and len(props[key]['value']):
propList[key] = props[key]['value'][0]
else:
propList[key] = None
# An inline function which replaces format string tokens
def _placeHolder(x):
try:
x = x % propList
except KeyError:
pass
return x
# Walk trough each line of the process list an replace placeholders.
for line in fltr:
if 'params' in fltr[line]:
fltr[line]['params'] = map(_placeHolder,
fltr[line]['params'])
return fltr
def get_object_type_by_dn(self, dn):
"""
Returns the objectType for a given DN
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn}, {'_type': 1})
return res[0]['_type'] if res.count() == 1 else None
def get_references(self, override=None):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for ref, info in self._objectFactory.getReferences(override or self.__class__.__name__).items():
for ref_attribute, dsc in info.items():
for idsc in dsc:
if self.myProperties[idsc[1]]['orig_value'] and len(self.myProperties[idsc[1]]['orig_value']):
oval = self.myProperties[idsc[1]]['orig_value'][0]
else:
oval = None
dns = index.search({'_type': ref, ref_attribute: oval}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
idsc[1],
getattr(self, idsc[1]),
dns or [],
self.myProperties[idsc[1]]['multivalue']))
return res
def update_refs(self, data):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
# Next iterration if there's no change for the relevant
# attribute
if not self_attr in data:
continue
# Load object and change value to the new one
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
o_value = data[self_attr]['orig']
if type(c_value) == list:
if type(o_value) == list:
c_value = filter(lambda x: x not in o_value, c_value)
else:
c_value = filter(lambda x: x != o_value, c_value)
if multivalue:
c_value.append(data[self_attr]['value'])
else:
c_value.append(data[self_attr]['value'][0])
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, data[self_attr]['value'][0])
c_obj.commit()
def remove_refs(self):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
if type(value) == list:
c_value = filter(lambda x: x not in value, c_value)
else:
c_value = filter(lambda x: x != value, c_value)
setattr(c_obj, ref_attr, c_value)
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def get_dn_references(self):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for info in self._objectFactory.getReferences("*", "dn").values():
for ref_attribute in info.keys():
dns = index.search({ref_attribute: self.dn}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
map(lambda s: s.decode('utf-8'), dns if dns else [])
))
return res
def update_dn_refs(self, new_dn):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
c_value.append(new_dn)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, new_dn)
c_obj.commit()
def remove_dn_refs(self):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def remove(self):
"""
Removes this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_REMOVE_NON_BASE_OBJECT'))
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Remove for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre remove", obj))
# Call pre-remove now
self.__execute_hook("PreRemove")
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.remove(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post remove", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def simulate_move(self, orig_dn):
"""
Simulate a moves for this object
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
obj = self
zope.event.notify(ObjectChanged("pre move", obj, dn=self.dn, orig_dn=orig_dn))
# Update the DN refs which have most probably changed
self.update_dn_refs(self.dn)
zope.event.notify(ObjectChanged("post move", obj, dn=self.dn, orig_dn=orig_dn))
def move(self, new_base):
"""
Moves this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
# Collect backends
backends = [getattr(self, '_backend')]
# Collect all other backends
for info in self.myProperties.values():
for be in info['backend']:
if not be in backends:
backends.append(be)
obj = self
zope.event.notify(ObjectChanged("pre move", obj))
# Move for primary backend
be = ObjectBackendRegistry.getBackend(backends[0])
be.move(self.uuid, new_base)
# Update the DN refs which have most probably changed
p_backend = getattr(self, '_backend')
be = ObjectBackendRegistry.getBackend(p_backend)
dn = be.uuid2dn(self.uuid)
self.update_dn_refs(dn)
zope.event.notify(ObjectChanged("post move", obj, dn=dn))
def retract(self):
"""
Removes this object extension
"""
#pylint: disable=E1101
if self._base_object:
raise ObjectException(C.make_error('OBJECT_BASE_NO_RETRACT'))
# Call pre-remove now
self.__execute_hook("PreRemove")
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Retract for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre retract", obj))
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.retract(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post retract", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def is_attr_set(self, name):
return len(self.myProperties[name]['in_value'])
def is_attr_using_default(self, name):
return not self.is_attr_set(name) and self.myProperties[name]['default']
def __execute_hook(self, hook_type):
# Call post-remove now
hooks = getattr(self, '__hooks')
if hook_type in hooks:
for hook in hooks[hook_type]:
hook["ref"](self)
class IObjectChanged(Interface):
def __init__(self, obj):
pass
class IAttributeChanged(Interface):
def __init__(self, attr, value):
pass
class ObjectChanged(object):
implements(IObjectChanged)
def __init__(self, reason, obj=None, dn=None, uuid=None, orig_dn=None, o_type=None):
self.reason = reason
self.uuid = uuid or obj.uuid
self.dn = dn or obj.dn
self.orig_dn = orig_dn or obj.orig_dn
self.o_type = o_type or obj.__class__.__name__
class AttributeChanged(object):
implements(IAttributeChanged)
def __init__(self, reason, obj, target):
self.reason = reason
self.target = target
self.uuid = obj.uuid
from clacks.agent.objects.proxy import ObjectProxy
| en | 0.7869 | # This file is part of the clacks framework. # # http://clacks-project.org # # Copyright: # (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de # # License: # GPL-2: http://www.gnu.org/licenses/gpl-2.0.html # # See the LICENSE file in the project's top-level directory for details. The object base class. # Status # Register the errors handled by us This class is the base class for all objects. It contains getter and setter methods for the object attributes and it is able to initialize itself by reading data from backends. It also contains the ability to execute the in- and out-filters for the object properties. All meta-classes for objects, created by the XML defintions, will inherit this class. Returns a list containing all attributes in the correct save-order. Due to the fact that some attributes depend on another, we have to save some attributes first and then the others. Helper method for '__saveOrder' to detect the dependency depth (level) for an attribute # Instantiate Backend-Registry # Group attributes by Backend # Load dynamic dropdown-values # Initialize an empty array for each backend # Append property # Initialize object using a DN # Set status to modified for attributes that do not have a value but are # mandatory and have a default. # This ensures that default values are passed to the out_filters and get saved # afterwards. # (Defaults will be passed to in-filters too, if they are not overwritten by _read()) This method tries to initialize a object instance by reading data from the defined backend. Attributes will be grouped by their backend to ensure that only one request per backend will be performed. # Generate missing values #pylint: disable=E1101 # Get last change timestamp # Load attributes for each backend. # And then assign the values to the properties. # Create a dictionary with all attributes we want to fetch # {attribute_name: type, name: type} # Assign fetched value to the properties. # Keep original values, they may be overwritten in the in-filters. # Once we've loaded all properties from the backend, execute the # in-filters. # Skip loading in-filters for None values # Execute defined in-filters. # Execute each in-filter # Convert the received type into the target type if not done already #pylint: disable=E1101 # Convert values from incoming backend-type to required type # Convert all values to required type # Keep the initial value Deleter method for properties. # Check if this attribute is blocked by another attribute and its value. # Do not allow to write to read-only attributes. # Do not allow remove mandatory attributes # If not already in removed state This is the setter method for object attributes. Each given attribute value is validated with the given set of validators. # Store non property values # A none value was passed to clear the value # Try to save as property value # Check if this attribute is blocked by another attribute and its value. # Do not allow to write to read-only attributes. # Check if the given value has to match one out of a given list. # Set the new value # Check if the new value is s list. # Eventually fixup value from incoming JSON string # Check if the new value is valid #pylint: disable=E1101 # Validate value # Ensure that unique values stay unique. Let the backend test this. #if self.myProperties[name]['unique']: # backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend']) # if not backendI.is_uniq(name, new_value): # raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value)) # Assign the properties new value. # Update status if there's a change #pylint: disable=E1101 The getter method object attributes. (It differentiates between object attributes and class-members) # If the requested property exists in the object-attributes, then return it. # We can have single and multivalues, return the correct type here. # The requested property-name seems to be a method, return the method reference. Return the template data - if any. Else None. Return the template data - if any. Else None. # If there's a template file, try to find it # Absolute path # Relative path # Find path #@UndefinedVariable #@UndefinedVariable # Build new merged resource element Return the type of a given object attribute. Checks whether everything is fine with the extension and its given values or not. # Create a copy to avoid touching the original values # Check if _mode matches with the current object type #pylint: disable=E1101 # Check if we are allowed to create this base object on the given base # Transfer values form other commit processes into ourselfes # Transfer status into commit status # Collect values by store and process the property filters # Skip foreign properties # Check if this attribute is blocked by another attribute and its value. # Check if all required attributes are set. (Skip blocked once, they cannot be set!) # Process each and every out-filter with a clean set of input values, # to avoid that return-values overwrite themselves. # Collect properties by backend # Skip foreign properties # Ensure that mandatory values are set # Do not save untouched values Commits changes of an object to the corresponding backends. # Create a copy to avoid touching the original values # Transfer status into commit status # Transfer values form other commit processes into ourselfes # Adapt property states # Run this once - If any state was adapted, then run again to ensure # that all dependencies are processed. # Adapt status from dependent properties. # Collect values by store and process the property filters # Skip foreign properties # Do not save untouched values # Get the new value for the property and execute the out-filter # Process each and every out-filter with a clean set of input values, # to avoid that return-values overwrite themselves. # Collect properties by backend # Skip foreign properties # Do not save untouched values # Create a backend compatible list of all changed attributes. # Collect properties by backend # Convert the properities type to the required format - if its not of the expected type. # Append entry to the to-be-stored list # We may have a plugin without any attributes, like the group asterisk extension, in # this case we've to update the object despite of the lack of properties. # Leave the show if there's nothing to do # Skip NULL backend. Nothing to save, anyway. # Skip the whole process if there's no change at all # Update references using the toStore information # Handle by backend # Call pre-hooks now # First, take care about the primary backend... # Eventually the DN has changed # Take DN for newly created objects # ... then walk thru the remaining ones # Skip primary backend - already done # Call post-hooks now Reverts all changes made to this object since it was loaded. This method processes a given process-list (fltr) for a given property (prop). And return TRUE if the value matches the validator set and FALSE if not. # This is our process-line pointer it points to the process-list line # we're executing at the moment # Our filter result stack # Process the list till we reach the end.. # Get the current line and increase the process list pointer. # A condition matches for something and returns a boolean value. # We'll put this value on the stack for later use. # Build up argument list # Process condition and keep results # Log what happend! # Append the result to the stack. # A comparator compares two values from the stack and then returns a single # boolean value. # Add last error message # Log what happend! # Attach last error message This method processes a given process-list (fltr) for a given property (prop). For example: When a property has to be stored in the backend, it will run through the out-filter-process-list and thus will be transformed into a storable key, value pair. # Search for replaceable patterns in the process-list. # This is our process-line pointer it points to the process-list line # we're executing at the moment # Our filter result stack # Log values # Process the list till we reach the end.. # Get the current line and increase the process list pointer. # A filter is used to manipulate the 'value' or the 'key' or maybe both. # Build up argument list # Process filter and keep results # Ensure that the processed data is still valid. # Filter may mess things up and then the next cannot process correctly. # Check if the filter returned all expected property values. # Check if the returned value-type is list or None. # A condition matches for something and returns a boolean value. # We'll put this value on the stack for later use. # Build up argument list # Process condition and keep results # Handle jump, for example if a condition has failed, jump over its filter-chain. # Jump to <line> -1 because we will increase the line ptr later. # A comparator compares two values from the stack and then returns a single # boolean value. # Log current values #self.log.debug(" result") #for pkey in prop: # self.log.debug(" %s: %s" % (pkey, prop[pkey]['value'])) This method fill in placeholder into in- and out-filters. # Collect all property values # An inline function which replaces format string tokens # Walk trough each line of the process list an replace placeholders. Returns the objectType for a given DN #@UnusedVariable # Next iterration if there's no change for the relevant # attribute # Load object and change value to the new one #@UnusedVariable Removes this object - and eventually it's containements. #pylint: disable=E1101 # Remove all references to ourselves # Collect backends # Remove for all backends, removing the primary one as the last one # Call pre-remove now # Remove all non exclusive properties #pylint: disable=E1101 # Call post-remove now Simulate a moves for this object #pylint: disable=E1101 # Update the DN refs which have most probably changed Moves this object - and eventually it's containements. #pylint: disable=E1101 # Collect backends # Collect all other backends # Move for primary backend # Update the DN refs which have most probably changed Removes this object extension #pylint: disable=E1101 # Call pre-remove now # Remove all references to ourselves # Collect backends # Retract for all backends, removing the primary one as the last one # Remove all non exclusive properties #pylint: disable=E1101 # Call post-remove now # Call post-remove now | 1.866216 | 2 |
tests/test_benchmark.py | fossabot/BIRL | 0 | 10485 | <gh_stars>0
"""
Testing default benchmarks in single thred and parallel configuration
Check whether it generates correct outputs and resulting values
Copyright (C) 2017-2019 <NAME> <<EMAIL>>
"""
import argparse
import logging
import os
import shutil
import sys
import unittest
try: # python 3
from unittest.mock import patch
except ImportError: # python 2
from mock import patch
import numpy as np
import pandas as pd
from numpy.testing import assert_raises, assert_array_almost_equal
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.data_io import update_path, save_config_yaml
from birl.utilities.dataset import args_expand_parse_images
from birl.utilities.experiments import parse_arg_params, try_decorator
from birl.benchmark import ImRegBenchmark
from birl.bm_template import BmTemplate
PATH_ROOT = os.path.dirname(update_path('birl'))
PATH_DATA = update_path('data-images')
PATH_CSV_COVER_MIX = os.path.join(PATH_DATA, 'pairs-imgs-lnds_mix.csv')
PATH_CSV_COVER_ANHIR = os.path.join(PATH_DATA, 'pairs-imgs-lnds_histol.csv')
# logging.basicConfig(level=logging.INFO)
class TestBmRegistration(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(level=logging.INFO)
cls.path_out = os.path.join(PATH_ROOT, 'output-testing')
shutil.rmtree(cls.path_out, ignore_errors=True)
os.mkdir(cls.path_out)
def _remove_default_experiment(self, bm_name):
path_expt = os.path.join(self.path_out, bm_name)
shutil.rmtree(path_expt, ignore_errors=True)
@classmethod
def test_benchmark_invalid_inputs(self):
# test missing some parameters
params = {
'path_table': 'x',
'path_out': 'x',
'nb_workers': 0,
'unique': False,
}
# try a missing params
for miss in ['path_table', 'path_out', 'unique']:
params_miss = params.copy()
del params_miss[miss]
assert_raises(AssertionError, ImRegBenchmark, params_miss)
# not defined output folder
assert_raises(Exception, ImRegBenchmark, params)
def test_benchmark_failing(self):
""" test run in parallel with failing experiment """
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': 'nothing',
'nb_workers': 4,
'visual': True,
'unique': True,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
# no landmarks was copy and also no experiment results was produced
list_csv = [
len([csv for csv in files if os.path.splitext(csv)[-1] == '.csv'])
for _, _, files in os.walk(benchmark.params['path_exp'])
]
self.assertEqual(sum(list_csv), 0)
del benchmark
def test_benchmark_parallel(self):
""" test run in parallel (2 threads) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'preprocessing': ['gray', 'matching-rgb'],
'nb_workers': 2,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
# run it for the first time, complete experiment
benchmark.run()
# rerun experiment simulated repeating unfinished benchmarks
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0., 0., 0., 0.], final_stds=[0., 0., 0., 0., 0.])
del benchmark
def test_benchmark_simple(self):
""" test run in sequence (1 thread) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_ANHIR,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': ['matching-hsv', 'gray'],
'nb_workers': 1,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0.], final_stds=[0., 0.])
del benchmark
def test_benchmark_template(self):
""" test run in single thread """
path_config = os.path.join(self.path_out, 'sample_config.yaml')
save_config_yaml(path_config, {})
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'path_config': path_config,
'nb_workers': 2,
'unique': False,
'visual': True,
}
benchmark = BmTemplate(params)
benchmark.run()
self.check_benchmark_results(
benchmark, final_means=[28., 68., 73., 76., 95.], final_stds=[1., 13., 28., 28., 34.]
)
os.remove(path_config)
del benchmark
def check_benchmark_results(self, benchmark, final_means, final_stds):
""" check whether the benchmark folder contains all required files
and compute statistic correctly """
bm_name = benchmark.__class__.__name__
path_bm = os.path.join(self.path_out, bm_name)
self.assertTrue(os.path.exists(path_bm), msg='Missing benchmark: %s' % bm_name)
# required output files
for file_name in [
benchmark.NAME_CSV_REGISTRATION_PAIRS, benchmark.NAME_RESULTS_CSV, benchmark.NAME_RESULTS_TXT
]:
self.assertTrue(
os.path.isfile(os.path.join(path_bm, file_name)),
msg='Missing "%s" file in the BM experiment' % file_name
)
# load registration file
path_csv = os.path.join(path_bm, benchmark.NAME_CSV_REGISTRATION_PAIRS)
df_regist = pd.read_csv(path_csv, index_col=0)
# only two items in the benchmark
self.assertEqual(
len(df_regist),
len(benchmark._df_overview),
msg='Found only %i records instead of %i' % (len(df_regist), len(benchmark._df_overview))
)
# test presence of particular columns
for col in list(benchmark.COVER_COLUMNS) + [benchmark.COL_IMAGE_MOVE_WARP]:
self.assertIn(col, df_regist.columns, msg='Missing column "%s" in result table' % col)
cols_lnds_warp = [
col in df_regist.columns for col in [benchmark.COL_POINTS_REF_WARP, benchmark.COL_POINTS_MOVE_WARP]
]
self.assertTrue(any(cols_lnds_warp), msg='Missing any column of warped landmarks')
col_lnds_warp = benchmark.COL_POINTS_REF_WARP if cols_lnds_warp[0] \
else benchmark.COL_POINTS_MOVE_WARP
# check existence of all mentioned files
for _, row in df_regist.iterrows():
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[benchmark.COL_IMAGE_MOVE_WARP])),
msg='Missing image "%s"' % row[benchmark.COL_IMAGE_MOVE_WARP]
)
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[col_lnds_warp])),
msg='Missing landmarks "%s"' % row[col_lnds_warp]
)
# check existence of statistical results
for stat_name in ['Mean', 'STD', 'Median', 'Min', 'Max']:
self.assertTrue(
any(stat_name in col for col in df_regist.columns), msg='Missing statistics "%s"' % stat_name
)
# test specific results
assert_array_almost_equal(sorted(df_regist['TRE Mean'].values), np.array(final_means), decimal=0)
assert_array_almost_equal(sorted(df_regist['TRE STD'].values), np.array(final_stds), decimal=0)
def test_try_wrap(self):
self.assertIsNone(try_wrap())
def test_argparse(self):
with patch('argparse._sys.argv', ['script.py']):
args = parse_arg_params(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_argparse_images(self):
with patch('argparse._sys.argv', ['script.py', '-i', 'an_image.png']):
args = args_expand_parse_images(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_fail_visual(self):
fig = ImRegBenchmark._visual_image_move_warp_lnds_move_warp({ImRegBenchmark.COL_POINTS_MOVE_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark._visual_image_move_warp_lnds_ref_warp({ImRegBenchmark.COL_POINTS_REF_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark.visualise_registration((0, {}))
self.assertIsNone(fig)
@try_decorator
def try_wrap():
return '%i' % '42'
| """
Testing default benchmarks in single thred and parallel configuration
Check whether it generates correct outputs and resulting values
Copyright (C) 2017-2019 <NAME> <<EMAIL>>
"""
import argparse
import logging
import os
import shutil
import sys
import unittest
try: # python 3
from unittest.mock import patch
except ImportError: # python 2
from mock import patch
import numpy as np
import pandas as pd
from numpy.testing import assert_raises, assert_array_almost_equal
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.data_io import update_path, save_config_yaml
from birl.utilities.dataset import args_expand_parse_images
from birl.utilities.experiments import parse_arg_params, try_decorator
from birl.benchmark import ImRegBenchmark
from birl.bm_template import BmTemplate
PATH_ROOT = os.path.dirname(update_path('birl'))
PATH_DATA = update_path('data-images')
PATH_CSV_COVER_MIX = os.path.join(PATH_DATA, 'pairs-imgs-lnds_mix.csv')
PATH_CSV_COVER_ANHIR = os.path.join(PATH_DATA, 'pairs-imgs-lnds_histol.csv')
# logging.basicConfig(level=logging.INFO)
class TestBmRegistration(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(level=logging.INFO)
cls.path_out = os.path.join(PATH_ROOT, 'output-testing')
shutil.rmtree(cls.path_out, ignore_errors=True)
os.mkdir(cls.path_out)
def _remove_default_experiment(self, bm_name):
path_expt = os.path.join(self.path_out, bm_name)
shutil.rmtree(path_expt, ignore_errors=True)
@classmethod
def test_benchmark_invalid_inputs(self):
# test missing some parameters
params = {
'path_table': 'x',
'path_out': 'x',
'nb_workers': 0,
'unique': False,
}
# try a missing params
for miss in ['path_table', 'path_out', 'unique']:
params_miss = params.copy()
del params_miss[miss]
assert_raises(AssertionError, ImRegBenchmark, params_miss)
# not defined output folder
assert_raises(Exception, ImRegBenchmark, params)
def test_benchmark_failing(self):
""" test run in parallel with failing experiment """
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': 'nothing',
'nb_workers': 4,
'visual': True,
'unique': True,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
# no landmarks was copy and also no experiment results was produced
list_csv = [
len([csv for csv in files if os.path.splitext(csv)[-1] == '.csv'])
for _, _, files in os.walk(benchmark.params['path_exp'])
]
self.assertEqual(sum(list_csv), 0)
del benchmark
def test_benchmark_parallel(self):
""" test run in parallel (2 threads) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'preprocessing': ['gray', 'matching-rgb'],
'nb_workers': 2,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
# run it for the first time, complete experiment
benchmark.run()
# rerun experiment simulated repeating unfinished benchmarks
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0., 0., 0., 0.], final_stds=[0., 0., 0., 0., 0.])
del benchmark
def test_benchmark_simple(self):
""" test run in sequence (1 thread) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_ANHIR,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': ['matching-hsv', 'gray'],
'nb_workers': 1,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0.], final_stds=[0., 0.])
del benchmark
def test_benchmark_template(self):
""" test run in single thread """
path_config = os.path.join(self.path_out, 'sample_config.yaml')
save_config_yaml(path_config, {})
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'path_config': path_config,
'nb_workers': 2,
'unique': False,
'visual': True,
}
benchmark = BmTemplate(params)
benchmark.run()
self.check_benchmark_results(
benchmark, final_means=[28., 68., 73., 76., 95.], final_stds=[1., 13., 28., 28., 34.]
)
os.remove(path_config)
del benchmark
def check_benchmark_results(self, benchmark, final_means, final_stds):
""" check whether the benchmark folder contains all required files
and compute statistic correctly """
bm_name = benchmark.__class__.__name__
path_bm = os.path.join(self.path_out, bm_name)
self.assertTrue(os.path.exists(path_bm), msg='Missing benchmark: %s' % bm_name)
# required output files
for file_name in [
benchmark.NAME_CSV_REGISTRATION_PAIRS, benchmark.NAME_RESULTS_CSV, benchmark.NAME_RESULTS_TXT
]:
self.assertTrue(
os.path.isfile(os.path.join(path_bm, file_name)),
msg='Missing "%s" file in the BM experiment' % file_name
)
# load registration file
path_csv = os.path.join(path_bm, benchmark.NAME_CSV_REGISTRATION_PAIRS)
df_regist = pd.read_csv(path_csv, index_col=0)
# only two items in the benchmark
self.assertEqual(
len(df_regist),
len(benchmark._df_overview),
msg='Found only %i records instead of %i' % (len(df_regist), len(benchmark._df_overview))
)
# test presence of particular columns
for col in list(benchmark.COVER_COLUMNS) + [benchmark.COL_IMAGE_MOVE_WARP]:
self.assertIn(col, df_regist.columns, msg='Missing column "%s" in result table' % col)
cols_lnds_warp = [
col in df_regist.columns for col in [benchmark.COL_POINTS_REF_WARP, benchmark.COL_POINTS_MOVE_WARP]
]
self.assertTrue(any(cols_lnds_warp), msg='Missing any column of warped landmarks')
col_lnds_warp = benchmark.COL_POINTS_REF_WARP if cols_lnds_warp[0] \
else benchmark.COL_POINTS_MOVE_WARP
# check existence of all mentioned files
for _, row in df_regist.iterrows():
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[benchmark.COL_IMAGE_MOVE_WARP])),
msg='Missing image "%s"' % row[benchmark.COL_IMAGE_MOVE_WARP]
)
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[col_lnds_warp])),
msg='Missing landmarks "%s"' % row[col_lnds_warp]
)
# check existence of statistical results
for stat_name in ['Mean', 'STD', 'Median', 'Min', 'Max']:
self.assertTrue(
any(stat_name in col for col in df_regist.columns), msg='Missing statistics "%s"' % stat_name
)
# test specific results
assert_array_almost_equal(sorted(df_regist['TRE Mean'].values), np.array(final_means), decimal=0)
assert_array_almost_equal(sorted(df_regist['TRE STD'].values), np.array(final_stds), decimal=0)
def test_try_wrap(self):
self.assertIsNone(try_wrap())
def test_argparse(self):
with patch('argparse._sys.argv', ['script.py']):
args = parse_arg_params(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_argparse_images(self):
with patch('argparse._sys.argv', ['script.py', '-i', 'an_image.png']):
args = args_expand_parse_images(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_fail_visual(self):
fig = ImRegBenchmark._visual_image_move_warp_lnds_move_warp({ImRegBenchmark.COL_POINTS_MOVE_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark._visual_image_move_warp_lnds_ref_warp({ImRegBenchmark.COL_POINTS_REF_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark.visualise_registration((0, {}))
self.assertIsNone(fig)
@try_decorator
def try_wrap():
return '%i' % '42' | en | 0.754166 | Testing default benchmarks in single thred and parallel configuration Check whether it generates correct outputs and resulting values Copyright (C) 2017-2019 <NAME> <<EMAIL>> # python 3 # python 2 # Add path to root # logging.basicConfig(level=logging.INFO) # test missing some parameters # try a missing params # not defined output folder test run in parallel with failing experiment # no landmarks was copy and also no experiment results was produced test run in parallel (2 threads) # run it for the first time, complete experiment # rerun experiment simulated repeating unfinished benchmarks test run in sequence (1 thread) test run in single thread check whether the benchmark folder contains all required files and compute statistic correctly # required output files # load registration file # only two items in the benchmark # test presence of particular columns # check existence of all mentioned files # check existence of statistical results # test specific results | 2.148984 | 2 |
python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | 9 | 10486 | from .errors_exception_handling import errors_exception_handling
| from .errors_exception_handling import errors_exception_handling
| none | 1 | 1.097259 | 1 |
|
mtstub.py | shimniok/rockblock | 1 | 10487 | <reponame>shimniok/rockblock<gh_stars>1-10
#!/usr/bin/env python
##################################################################################################
## mtstub.py
##
## emulates rockblock api so I don't have to burn credits testing...
##################################################################################################
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import config
print "Content-type: plain/text"
print
form = cgi.FieldStorage()
print "OK,12345"
| #!/usr/bin/env python
##################################################################################################
## mtstub.py
##
## emulates rockblock api so I don't have to burn credits testing...
##################################################################################################
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import config
print "Content-type: plain/text"
print
form = cgi.FieldStorage()
print "OK,12345" | de | 0.634188 | #!/usr/bin/env python ################################################################################################## ## mtstub.py ## ## emulates rockblock api so I don't have to burn credits testing... ################################################################################################## #import cgitb; cgitb.enable() # for troubleshooting | 1.870813 | 2 |
sum.py | PraghadeshManivannan/Built-in-Functions-Python | 0 | 10488 | <filename>sum.py
#sum(iterable, start=0, /)
#Return the sum of a 'start' value (default: 0) plus an iterable of numbers
#When the iterable is empty, return the start value.
'''This function is intended specifically for use with numeric values and may
reject non-numeric types.'''
a = [1,3,5,7,9,4,6,2,8]
print(sum(a))
print(sum(a,start = 4))
| <filename>sum.py
#sum(iterable, start=0, /)
#Return the sum of a 'start' value (default: 0) plus an iterable of numbers
#When the iterable is empty, return the start value.
'''This function is intended specifically for use with numeric values and may
reject non-numeric types.'''
a = [1,3,5,7,9,4,6,2,8]
print(sum(a))
print(sum(a,start = 4))
| en | 0.487904 | #sum(iterable, start=0, /) #Return the sum of a 'start' value (default: 0) plus an iterable of numbers #When the iterable is empty, return the start value. This function is intended specifically for use with numeric values and may
reject non-numeric types. | 3.992311 | 4 |
idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | 112 | 10489 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| en | 0.689004 | ################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# # === STANDARD CONSTRUCTOR # IAD is set correctly after calling applyTransF # === CONSTRUCTOR - Aligned with {100} # Default implementation # === CONSTRUCTOR - Aligned with {110} # === CONSTRUCTOR - Aligned with {111} # === CONSTRUCTOR - Aligned with {xyz} # === MANIPULATION METHODS # === AUXILIARY METHODS # === PROPERTY EVALUATION METHODS # NOTE: inherited from UnitCellLattice # def isOnLattice(self,P): # === BASIC QUERY METHODS | 2.023895 | 2 |
elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 0 | 10490 | <filename>elateridae_baits.py
# coding: utf8
"""
Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits
Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks
Author <NAME> <EMAIL>
License: MIT
Copywright: Government of Canada
"""
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
| <filename>elateridae_baits.py
# coding: utf8
"""
Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits
Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks
Author <NAME> <EMAIL>
License: MIT
Copywright: Government of Canada
"""
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
| en | 0.816006 | # coding: utf8 Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks Author <NAME> <EMAIL> License: MIT Copywright: Government of Canada Main Function to run Staphylinidae Bait Designer :return: # parser.add_argument('-p', type=str, required=True, # help='Priorities File for Staphylinidae') # Original was going to stagger tile the baits, but bait manufacturer inherently does this # tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window) # processed_blocks_dir = tiled_blocks_dir Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to Extract out a conserved block :param sum_file: :param alignment_directory: :param results_directory: :return: Output Directory of conserved blocks Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is highly conserved, and sorts by the degree of conservation into an output file :param directory: Directory of T_coffee results (containing score_ascii and aln files) :return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400 # Scans an alignment and converts the cons string of numbers into a continous list of numbers # Scans number list for sequence containing the highest window range of conserved bases within 95% of max # TCS score for said window range aka 9*Window Range # Sort the list so the highest score block within the window range is first. If the window range # has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease # window range and try again Writes the dictionary of all ortholog T_coffee scores/sums to csv file :param directory: :param dict_of_max_sums: :return: # Sorts dictionary into a list by score sum and then window length Filters blocks generated by longest exon length and write sum functions based on various criteria :param directory: Directory of fasta blocks to filter :param results_dir: Parent Result Folder :param window: Minimum length of a conserved block in basepairs :return: Output Directory of filtered blocks # For each block/file extract out sequences that meet the following critiera: # Part of Priority List = 1 # Minimum Length of Window size in basepairs # Gaps represent less than 20% of sequence # Block contains atleast 5 sequences from priority list = 1 # Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs Takes a prefiltered block generated by the filtered_blocks function and tiles each bait The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other :param directory: :param results_dir: :param window: :return: # Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but using biopython ensures each fasta entry is formatted correctly :param directory: Input directory of fastas :param results_dir: Output Parent directory :param prefix: Name of the output file :param window: :return: | 2.793746 | 3 |
poilab.py | octeufer/Annotate_Optimize | 0 | 10491 | <filename>poilab.py
import sys
import numpy as np
sys.path.append("d:/data/annooptimize")
import triangle
import time
tinternal = list()
def labstart():
points,tri = triangle.gentri("d:/data/annooptimize/Annodata/200600/poise.shp")
plabels = triangle.dynamicSize(points)
conflictg = triangle.conflictgraphdy(points,tri,plabels)
acg = triangle.accesssubg(conflictg)
len(acg)
allsolve = np.zeros((len(points),4,2),np.float64)
points2,tri2 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIhalf.shp")
plabels2 = triangle.dynamicSize(points2)
conflictg2 = triangle.conflictgraphdy(points2,tri2,plabels2)
acg2 = triangle.accesssubg(conflictg2)
points3,tri3 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIall.shp")
plabels3 = triangle.dynamicSize(points3)
conflictg3 = triangle.conflictgraphdy(points3,tri3,plabels3)
acg3 = triangle.accesssubg(conflictg3)
time.clock()
costs,tabucs= triangle.globaltabuiter2dy(acg,points,1,plabels)
tinternal.append(time.clock())
costs2,tabucs2= triangle.globaltabuiter2dy(acg2,points2,1,plabels2)
tinternal.append(time.clock())
costs3,tabucs3= triangle.globaltabuiter2dy(acg3,points3,1,plabels3)
tinternal.append(time.clock())
return tinternal,(costs,tabucs),(costs2,tabucs2),(costs3,tabucs3)
| <filename>poilab.py
import sys
import numpy as np
sys.path.append("d:/data/annooptimize")
import triangle
import time
tinternal = list()
def labstart():
points,tri = triangle.gentri("d:/data/annooptimize/Annodata/200600/poise.shp")
plabels = triangle.dynamicSize(points)
conflictg = triangle.conflictgraphdy(points,tri,plabels)
acg = triangle.accesssubg(conflictg)
len(acg)
allsolve = np.zeros((len(points),4,2),np.float64)
points2,tri2 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIhalf.shp")
plabels2 = triangle.dynamicSize(points2)
conflictg2 = triangle.conflictgraphdy(points2,tri2,plabels2)
acg2 = triangle.accesssubg(conflictg2)
points3,tri3 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIall.shp")
plabels3 = triangle.dynamicSize(points3)
conflictg3 = triangle.conflictgraphdy(points3,tri3,plabels3)
acg3 = triangle.accesssubg(conflictg3)
time.clock()
costs,tabucs= triangle.globaltabuiter2dy(acg,points,1,plabels)
tinternal.append(time.clock())
costs2,tabucs2= triangle.globaltabuiter2dy(acg2,points2,1,plabels2)
tinternal.append(time.clock())
costs3,tabucs3= triangle.globaltabuiter2dy(acg3,points3,1,plabels3)
tinternal.append(time.clock())
return tinternal,(costs,tabucs),(costs2,tabucs2),(costs3,tabucs3)
| none | 1 | 2.497932 | 2 |
|
t_core/tc_python/xrule.py | levilucio/SyVOLT | 3 | 10492 |
from util.infinity import INFINITY
from tc_python.arule import ARule
from t_core.rollbacker import Rollbacker
from t_core.resolver import Resolver
class XRule(ARule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
'''
# external_matches_only=True because further matches of this rule are only processed after a roll-back
super(XRule, self).__init__(LHS, RHS)
self.M.max = max_iterations
self.I.max_iterations = max_iterations
self.B = Rollbacker(condition=LHS, max_iterations=max_iterations)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Checkpoint the original packet
self.B.packet_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
packet = self.B.restore(packet)
if self.M.exception:
self.exception = self.M.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Choose one match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.restore(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.restore(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
self.is_success = True
return packet
def next_in(self, packet):
self.exception = None
self.is_success = False
packet = self.B.next_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Choose the next match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.next_in(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.next_in(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Output success packet
self.is_success = True
return packet
class XRule_r(XRule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function.
'''
super(XRule_r, self).__init__()
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
packet = super(XRule_r, self).packet_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
def next_in(self, packet):
packet = super(XRule_r, self).next_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
|
from util.infinity import INFINITY
from tc_python.arule import ARule
from t_core.rollbacker import Rollbacker
from t_core.resolver import Resolver
class XRule(ARule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
'''
# external_matches_only=True because further matches of this rule are only processed after a roll-back
super(XRule, self).__init__(LHS, RHS)
self.M.max = max_iterations
self.I.max_iterations = max_iterations
self.B = Rollbacker(condition=LHS, max_iterations=max_iterations)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Checkpoint the original packet
self.B.packet_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
packet = self.B.restore(packet)
if self.M.exception:
self.exception = self.M.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Choose one match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.restore(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.restore(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
self.is_success = True
return packet
def next_in(self, packet):
self.exception = None
self.is_success = False
packet = self.B.next_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Choose the next match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.next_in(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.next_in(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Output success packet
self.is_success = True
return packet
class XRule_r(XRule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function.
'''
super(XRule_r, self).__init__()
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
packet = super(XRule_r, self).packet_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
def next_in(self, packet):
packet = super(XRule_r, self).next_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
| en | 0.726417 | Applies the transformation on one match with roll-back capability. Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS). # external_matches_only=True because further matches of this rule are only processed after a roll-back # Checkpoint the original packet # Match # Choose one match # Rewrite # Choose the next match # Rewrite # Output success packet Applies the transformation on one match with roll-back capability. Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function. # is_success is True # Resolve any conflicts if necessary # Output success packet # is_success is True # Resolve any conflicts if necessary # Output success packet | 2.289623 | 2 |
tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 84 | 10493 | <reponame>keisks/propara<gh_stars>10-100
from unittest import TestCase
from propara.commonsense.semantic_lexicon_knowledge.ai2_lexicon import AI2Lexicon, AI2LexiconPredicate, AI2LexiconArg, AI2LexiconIndications, \
AI2LexiconPattern
class TestAI2Lexicon(TestCase):
def setUp(self):
self.lexicon_fp = "tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv"
def testLoads(self):
self.lexicon = AI2Lexicon(self.lexicon_fp)
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate', has_agent=True, has_patient=False)}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate', has_agent=True, has_patient=False)}")
#
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate')}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate')}")
# v2 doesn't contain size, temperature, phase attributes
# infile = "tests/fixtures/ie/ai2-lexicon-v2.tsv"
# the following path is useful when debugging from browser.
# self.lexicon = AI2Lexicon("tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv")
assert self.lexicon._after_subj(("blend in", AI2LexiconPattern.SO)) == {
AI2LexiconPredicate.IS_AT: AI2LexiconArg.OBJECT,
AI2LexiconPredicate.NOT_IS_AT: AI2LexiconArg.PREP_SRC,
}
assert self.lexicon._after_obj(("absorb", AI2LexiconPattern.SO))[
AI2LexiconPredicate.IS_AT] == AI2LexiconArg.SUBJECT
# assert self.lexicon._after_obj(("absorbs", AI2LexiconPattern.SO)).get(AI2LexiconPredicate.IS_AT, "") == AI2LexiconArg.SUBJECT
assert len(self.lexicon._after_obj(("blend in", AI2LexiconPattern.SO))) == 0
assert len(self.lexicon._after_obj(("blend blend2", AI2LexiconPattern.SO))) == 0
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("absorbs")
assert AI2LexiconIndications.MOVED in self.lexicon.what_happens_to_obj("absorbs")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_subj("sprout", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_subj("sprout")
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.TEMPERATURE_INC not in self.lexicon.what_happens_to_obj("turn")
assert AI2LexiconIndications.TEMPERATURE_INC in self.lexicon.what_happens_to_subj("gets hot")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("gets bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("become bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("turned bigger")
assert AI2LexiconIndications.SIZE_INC not in self.lexicon.what_happens_to_obj("turns into bigger")
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("turned")
assert AI2LexiconIndications.PHASE_UNK_GAS in self.lexicon.what_happens_to_subj("turned gaseous")
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_subj("solidify", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_obj("solidify", has_agent=True,
has_patient=True)
assert AI2LexiconIndications.PHASE_UNK_SOLID not in self.lexicon.what_happens_to_subj("solidifies")
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_subj("sublime", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_obj("sublime", has_agent=True,
has_patient=True)
# if agent and patient both are present or only 1
# the difference is whether object is given or not
# this happens for all verbs that can be both transitive/intransitive
# they will have 2 entries.
#
# A big rock stops the stream of water from uphill => stream of water moved from uphill to rock
# car stops at the intersection ==> car moved to intersection
# we have removed lots of fine details in the patterns (VerbNet had much more info there)
# if agent and patient both are present or only 1
def test_type_of_pattern(self):
input = "SUBJECT VERB OBJECT PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB OBJECT"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.S
| from unittest import TestCase
from propara.commonsense.semantic_lexicon_knowledge.ai2_lexicon import AI2Lexicon, AI2LexiconPredicate, AI2LexiconArg, AI2LexiconIndications, \
AI2LexiconPattern
class TestAI2Lexicon(TestCase):
def setUp(self):
self.lexicon_fp = "tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv"
def testLoads(self):
self.lexicon = AI2Lexicon(self.lexicon_fp)
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate', has_agent=True, has_patient=False)}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate', has_agent=True, has_patient=False)}")
#
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate')}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate')}")
# v2 doesn't contain size, temperature, phase attributes
# infile = "tests/fixtures/ie/ai2-lexicon-v2.tsv"
# the following path is useful when debugging from browser.
# self.lexicon = AI2Lexicon("tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv")
assert self.lexicon._after_subj(("blend in", AI2LexiconPattern.SO)) == {
AI2LexiconPredicate.IS_AT: AI2LexiconArg.OBJECT,
AI2LexiconPredicate.NOT_IS_AT: AI2LexiconArg.PREP_SRC,
}
assert self.lexicon._after_obj(("absorb", AI2LexiconPattern.SO))[
AI2LexiconPredicate.IS_AT] == AI2LexiconArg.SUBJECT
# assert self.lexicon._after_obj(("absorbs", AI2LexiconPattern.SO)).get(AI2LexiconPredicate.IS_AT, "") == AI2LexiconArg.SUBJECT
assert len(self.lexicon._after_obj(("blend in", AI2LexiconPattern.SO))) == 0
assert len(self.lexicon._after_obj(("blend blend2", AI2LexiconPattern.SO))) == 0
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("absorbs")
assert AI2LexiconIndications.MOVED in self.lexicon.what_happens_to_obj("absorbs")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_subj("sprout", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_subj("sprout")
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.TEMPERATURE_INC not in self.lexicon.what_happens_to_obj("turn")
assert AI2LexiconIndications.TEMPERATURE_INC in self.lexicon.what_happens_to_subj("gets hot")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("gets bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("become bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("turned bigger")
assert AI2LexiconIndications.SIZE_INC not in self.lexicon.what_happens_to_obj("turns into bigger")
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("turned")
assert AI2LexiconIndications.PHASE_UNK_GAS in self.lexicon.what_happens_to_subj("turned gaseous")
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_subj("solidify", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_obj("solidify", has_agent=True,
has_patient=True)
assert AI2LexiconIndications.PHASE_UNK_SOLID not in self.lexicon.what_happens_to_subj("solidifies")
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_subj("sublime", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_obj("sublime", has_agent=True,
has_patient=True)
# if agent and patient both are present or only 1
# the difference is whether object is given or not
# this happens for all verbs that can be both transitive/intransitive
# they will have 2 entries.
#
# A big rock stops the stream of water from uphill => stream of water moved from uphill to rock
# car stops at the intersection ==> car moved to intersection
# we have removed lots of fine details in the patterns (VerbNet had much more info there)
# if agent and patient both are present or only 1
def test_type_of_pattern(self):
input = "SUBJECT VERB OBJECT PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB OBJECT"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.S | en | 0.775765 | # print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate', has_agent=True, has_patient=False)}") # print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate', has_agent=True, has_patient=False)}") # # print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate')}") # print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate')}") # v2 doesn't contain size, temperature, phase attributes # infile = "tests/fixtures/ie/ai2-lexicon-v2.tsv" # the following path is useful when debugging from browser. # self.lexicon = AI2Lexicon("tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv") # assert self.lexicon._after_obj(("absorbs", AI2LexiconPattern.SO)).get(AI2LexiconPredicate.IS_AT, "") == AI2LexiconArg.SUBJECT # if agent and patient both are present or only 1 # the difference is whether object is given or not # this happens for all verbs that can be both transitive/intransitive # they will have 2 entries. # # A big rock stops the stream of water from uphill => stream of water moved from uphill to rock # car stops at the intersection ==> car moved to intersection # we have removed lots of fine details in the patterns (VerbNet had much more info there) # if agent and patient both are present or only 1 | 2.593966 | 3 |
fitbit/__init__.py | erichilarysmithsr/python-fitbit | 0 | 10494 | # -*- coding: utf-8 -*-
"""
Fitbit API Library
------------------
:copyright: 2012-2015 ORCAS.
:license: BSD, see LICENSE for more details.
"""
from .api import Fitbit, FitbitOauthClient, FitbitOauth2Client
# Meta.
__title__ = 'fitbit'
__author__ = '<NAME> and ORCAS'
__author_email__ = '<EMAIL>'
__copyright__ = 'Copyright 2012-2015 ORCAS'
__license__ = 'Apache 2.0'
__version__ = '0.1.3'
__release__ = '0.1.3'
# Module namespace.
all_tests = []
| # -*- coding: utf-8 -*-
"""
Fitbit API Library
------------------
:copyright: 2012-2015 ORCAS.
:license: BSD, see LICENSE for more details.
"""
from .api import Fitbit, FitbitOauthClient, FitbitOauth2Client
# Meta.
__title__ = 'fitbit'
__author__ = '<NAME> and ORCAS'
__author_email__ = '<EMAIL>'
__copyright__ = 'Copyright 2012-2015 ORCAS'
__license__ = 'Apache 2.0'
__version__ = '0.1.3'
__release__ = '0.1.3'
# Module namespace.
all_tests = []
| en | 0.358271 | # -*- coding: utf-8 -*- Fitbit API Library ------------------ :copyright: 2012-2015 ORCAS. :license: BSD, see LICENSE for more details. # Meta. # Module namespace. | 1.103585 | 1 |
bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 1 | 10495 | <reponame>pogginicolo98/start2impact_exchange
from django.urls import include, path
from exchange.api.views import LatestOrdersListAPIView, OrderViewSet, ProfileAPIView
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'orders', OrderViewSet, basename='orders')
urlpatterns = [
path('profile/', ProfileAPIView.as_view(), name='profile-detail'),
path('orders/latest/', LatestOrdersListAPIView.as_view(), name='orders-latest'),
path('', include(router.urls))
]
| from django.urls import include, path
from exchange.api.views import LatestOrdersListAPIView, OrderViewSet, ProfileAPIView
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'orders', OrderViewSet, basename='orders')
urlpatterns = [
path('profile/', ProfileAPIView.as_view(), name='profile-detail'),
path('orders/latest/', LatestOrdersListAPIView.as_view(), name='orders-latest'),
path('', include(router.urls))
] | none | 1 | 1.979356 | 2 |
|
python/testData/formatter/indentInGenerator_after.py | jnthn/intellij-community | 2 | 10496 | def dbl():
return (
(a, a) for a in [])
| def dbl():
return (
(a, a) for a in [])
| none | 1 | 1.655636 | 2 |
|
kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | 0 | 10497 | from bs4 import BeautifulSoup
import requests
import re
def retrieveText():
print("Parsing text from online target")
url = "https://www.whitehouse.gov/the-press-office/2017/10/16/remarks-president-trump-and-senate-majority-leader-mitch-mcconnell-joint"
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
textwrapper = soup.find("div", { "class" : "field-item" })
textel = textwrapper.find_all("p", { "class" : None })
textstripped = []
for element in textel:
stripped = element.text.replace("\r", "\n").replace("\r", "").replace("\n", "").replace("Q ", "0002reporter: ").replace("THE PRESIDENT: ", "0001president: ").strip()
if "P.M." not in stripped and "A.M." not in stripped:
textstripped.append(stripped)
# print(textstripped)
return textstripped | from bs4 import BeautifulSoup
import requests
import re
def retrieveText():
print("Parsing text from online target")
url = "https://www.whitehouse.gov/the-press-office/2017/10/16/remarks-president-trump-and-senate-majority-leader-mitch-mcconnell-joint"
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
textwrapper = soup.find("div", { "class" : "field-item" })
textel = textwrapper.find_all("p", { "class" : None })
textstripped = []
for element in textel:
stripped = element.text.replace("\r", "\n").replace("\r", "").replace("\n", "").replace("Q ", "0002reporter: ").replace("THE PRESIDENT: ", "0001president: ").strip()
if "P.M." not in stripped and "A.M." not in stripped:
textstripped.append(stripped)
# print(textstripped)
return textstripped | en | 0.396912 | # print(textstripped) | 3.478917 | 3 |
cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 0 | 10498 | <reponame>gchiesa/cfmacro<filename>cfmacro/_resources/examples/lambda.py
# -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
}
| # -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
} | en | 0.649537 | # -*- coding: utf-8 -*- Implement a core handler for security groups ingress / egress :param event: :param context: :return: | 1.929878 | 2 |
tf2qa/predict_long.py | mikelkl/TF2-QA | 17 | 10499 | <reponame>mikelkl/TF2-QA<gh_stars>10-100
import torch
import argparse
from roberta_modeling import RobertaJointForLong
from transformers.modeling_roberta import RobertaConfig, RobertaModel
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader
import utils
from tqdm import tqdm
import os
import json
import collections
import pickle
import pandas as pd
from utils_nq import read_candidates_from_one_split, compute_long_pred
from roberta_long_preprocess import InputLongFeatures
RawResult = collections.namedtuple("RawResult",
["unique_id",
"long_start_logits",
"long_end_logits"])
def load_cached_data(feature_dir, output_features=False, evaluate=False):
features = torch.load(feature_dir)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if output_features:
return dataset, features
return dataset
def to_list(tensor):
return tensor.detach().cpu().tolist()
def make_submission(output_prediction_file, output_dir):
print("***** Making submmision *****")
test_answers_df = pd.read_json(output_prediction_file)
def create_short_answer(entry):
"""
:param entry: dict
:return: str
"""
if entry['answer_type'] == 0:
return ""
# if entry["short_answers_score"] < 1.5:
# return ""
if entry["yes_no_answer"] != "NONE":
return entry["yes_no_answer"]
answer = []
for short_answer in entry["short_answers"]:
if short_answer["start_token"] > -1:
answer.append(str(short_answer["start_token"]) + ":" + str(short_answer["end_token"]))
return " ".join(answer)
def create_long_answer(entry):
if entry['answer_type'] == 0:
return ''
# if entry["long_answer_score"] < 1.5:
# return ""
answer = []
if entry["long_answer"]["start_token"] > -1:
answer.append(str(entry["long_answer"]["start_token"]) + ":" + str(entry["long_answer"]["end_token"]))
return " ".join(answer)
for var_name in ['long_answer_score', 'short_answers_score', 'answer_type']:
test_answers_df[var_name] = test_answers_df['predictions'].apply(lambda q: q[var_name])
test_answers_df["long_answer"] = test_answers_df["predictions"].apply(create_long_answer)
test_answers_df["short_answer"] = test_answers_df["predictions"].apply(create_short_answer)
test_answers_df["example_id"] = test_answers_df["predictions"].apply(lambda q: str(q["example_id"]))
long_answers = dict(zip(test_answers_df["example_id"], test_answers_df["long_answer"]))
short_answers = dict(zip(test_answers_df["example_id"], test_answers_df["short_answer"]))
sample_submission = pd.read_csv("data/sample_submission.csv")
long_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_long")].apply(
lambda q: long_answers[q["example_id"].replace("_long", "")], axis=1)
short_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_short")].apply(
lambda q: short_answers[q["example_id"].replace("_short", "")], axis=1)
sample_submission.loc[
sample_submission["example_id"].str.contains("_long"), "PredictionString"] = long_prediction_strings
sample_submission.loc[
sample_submission["example_id"].str.contains("_short"), "PredictionString"] = short_prediction_strings
sample_submission.to_csv(os.path.join(output_dir, "submission.csv"), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default="0,1,2,3,4,5,6,7", type=str)
parser.add_argument("--eval_batch_size", default=128, type=int)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--float16", default=True, type=bool)
parser.add_argument("--bert_config_file", default='roberta_large/config.json', type=str)
parser.add_argument("--init_restore_dir", default='check_points/roberta-large-long-V00/best_checkpoint.pth', type=str)
parser.add_argument("--predict_file", default='data/simplified-nq-test.jsonl', type=str)
parser.add_argument("--output_dir", default='check_points/roberta-large-long-V00',
type=str)
parser.add_argument("--predict_feat", default='dataset/test_data_maxlen512_roberta_tfidf_features.bin',
type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
bert_config = RobertaConfig.from_json_file(args.bert_config_file)
model = RobertaJointForLong(RobertaModel(bert_config), bert_config)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
dataset, features = load_cached_data(feature_dir=args.predict_feat, output_features=True, evaluate=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
print("***** Running evaluation *****")
print(" Num examples =", len(dataset))
print(" Batch size =", args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, example_indices = batch
inputs = {'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids}
start_logits, end_logits = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = str(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
long_start_logits=start_logits[i].cpu().numpy(),
long_end_logits=end_logits[i].cpu().numpy())
all_results.append(result)
pickle.dump(all_results, open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'wb'))
# all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb'))
print("Going to candidates file")
candidates_dict = read_candidates_from_one_split(args.predict_file)
print("Compute_pred_dict")
nq_pred_dict = compute_long_pred(candidates_dict, features, all_results, args.n_best_size)
output_prediction_file = os.path.join(args.output_dir, 'test_predictions.json')
print("Saving predictions to", output_prediction_file)
with open(output_prediction_file, 'w') as f:
json.dump({'predictions': list(nq_pred_dict.values())}, f)
# make_submission(output_prediction_file, args.output_dir)
| import torch
import argparse
from roberta_modeling import RobertaJointForLong
from transformers.modeling_roberta import RobertaConfig, RobertaModel
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader
import utils
from tqdm import tqdm
import os
import json
import collections
import pickle
import pandas as pd
from utils_nq import read_candidates_from_one_split, compute_long_pred
from roberta_long_preprocess import InputLongFeatures
RawResult = collections.namedtuple("RawResult",
["unique_id",
"long_start_logits",
"long_end_logits"])
def load_cached_data(feature_dir, output_features=False, evaluate=False):
features = torch.load(feature_dir)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if output_features:
return dataset, features
return dataset
def to_list(tensor):
return tensor.detach().cpu().tolist()
def make_submission(output_prediction_file, output_dir):
print("***** Making submmision *****")
test_answers_df = pd.read_json(output_prediction_file)
def create_short_answer(entry):
"""
:param entry: dict
:return: str
"""
if entry['answer_type'] == 0:
return ""
# if entry["short_answers_score"] < 1.5:
# return ""
if entry["yes_no_answer"] != "NONE":
return entry["yes_no_answer"]
answer = []
for short_answer in entry["short_answers"]:
if short_answer["start_token"] > -1:
answer.append(str(short_answer["start_token"]) + ":" + str(short_answer["end_token"]))
return " ".join(answer)
def create_long_answer(entry):
if entry['answer_type'] == 0:
return ''
# if entry["long_answer_score"] < 1.5:
# return ""
answer = []
if entry["long_answer"]["start_token"] > -1:
answer.append(str(entry["long_answer"]["start_token"]) + ":" + str(entry["long_answer"]["end_token"]))
return " ".join(answer)
for var_name in ['long_answer_score', 'short_answers_score', 'answer_type']:
test_answers_df[var_name] = test_answers_df['predictions'].apply(lambda q: q[var_name])
test_answers_df["long_answer"] = test_answers_df["predictions"].apply(create_long_answer)
test_answers_df["short_answer"] = test_answers_df["predictions"].apply(create_short_answer)
test_answers_df["example_id"] = test_answers_df["predictions"].apply(lambda q: str(q["example_id"]))
long_answers = dict(zip(test_answers_df["example_id"], test_answers_df["long_answer"]))
short_answers = dict(zip(test_answers_df["example_id"], test_answers_df["short_answer"]))
sample_submission = pd.read_csv("data/sample_submission.csv")
long_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_long")].apply(
lambda q: long_answers[q["example_id"].replace("_long", "")], axis=1)
short_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_short")].apply(
lambda q: short_answers[q["example_id"].replace("_short", "")], axis=1)
sample_submission.loc[
sample_submission["example_id"].str.contains("_long"), "PredictionString"] = long_prediction_strings
sample_submission.loc[
sample_submission["example_id"].str.contains("_short"), "PredictionString"] = short_prediction_strings
sample_submission.to_csv(os.path.join(output_dir, "submission.csv"), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default="0,1,2,3,4,5,6,7", type=str)
parser.add_argument("--eval_batch_size", default=128, type=int)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--float16", default=True, type=bool)
parser.add_argument("--bert_config_file", default='roberta_large/config.json', type=str)
parser.add_argument("--init_restore_dir", default='check_points/roberta-large-long-V00/best_checkpoint.pth', type=str)
parser.add_argument("--predict_file", default='data/simplified-nq-test.jsonl', type=str)
parser.add_argument("--output_dir", default='check_points/roberta-large-long-V00',
type=str)
parser.add_argument("--predict_feat", default='dataset/test_data_maxlen512_roberta_tfidf_features.bin',
type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
bert_config = RobertaConfig.from_json_file(args.bert_config_file)
model = RobertaJointForLong(RobertaModel(bert_config), bert_config)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
dataset, features = load_cached_data(feature_dir=args.predict_feat, output_features=True, evaluate=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
print("***** Running evaluation *****")
print(" Num examples =", len(dataset))
print(" Batch size =", args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, example_indices = batch
inputs = {'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids}
start_logits, end_logits = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = str(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
long_start_logits=start_logits[i].cpu().numpy(),
long_end_logits=end_logits[i].cpu().numpy())
all_results.append(result)
pickle.dump(all_results, open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'wb'))
# all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb'))
print("Going to candidates file")
candidates_dict = read_candidates_from_one_split(args.predict_file)
print("Compute_pred_dict")
nq_pred_dict = compute_long_pred(candidates_dict, features, all_results, args.n_best_size)
output_prediction_file = os.path.join(args.output_dir, 'test_predictions.json')
print("Saving predictions to", output_prediction_file)
with open(output_prediction_file, 'w') as f:
json.dump({'predictions': list(nq_pred_dict.values())}, f)
# make_submission(output_prediction_file, args.output_dir) | en | 0.302535 | # Convert to Tensors and build dataset :param entry: dict :return: str # if entry["short_answers_score"] < 1.5: # return "" # if entry["long_answer_score"] < 1.5: # return "" # Eval! # all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb')) # make_submission(output_prediction_file, args.output_dir) | 2.117392 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.